google-cloud-video_intelligence-v1 0.2.2 → 0.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +4 -0
- data/lib/google/cloud/video_intelligence/v1.rb +3 -0
- data/lib/google/cloud/video_intelligence/v1/version.rb +1 -1
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service.rb +1 -1
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/client.rb +21 -18
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/operations.rb +1 -1
- data/lib/google/cloud/videointelligence/v1/video_intelligence_pb.rb +26 -0
- data/lib/google/cloud/videointelligence/v1/video_intelligence_services_pb.rb +2 -2
- data/proto_docs/google/api/resource.rb +50 -14
- data/proto_docs/google/cloud/videointelligence/v1/video_intelligence.rb +125 -47
- metadata +5 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 361b6d8d213fc0984ebd3f4df7a01a213a7bfc8655e449ec0531413198219785
|
4
|
+
data.tar.gz: eeadbbfdae9a1a1b4027a4b16c2a287529e7c0362f180b9c4228139f81470adb
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c51a92191d80d777f9e62d8bd65fbfff49cdbf2f233b35d84e098bbd5e208ea8a942eccddd1f8dbdb7fe1243aefb27f8056c78633628601cbd6d31849f6e1964
|
7
|
+
data.tar.gz: b7a956274dfe8b5b31e6706f073ffdc3e52f32003ec8ca272a95ebd937f3acca8b960c62b734b74321030bd96c99d04395c60e2461e35002d0c21a10cd35a7eb
|
data/README.md
CHANGED
@@ -18,6 +18,7 @@ In order to use this library, you first need to go through the following steps:
|
|
18
18
|
|
19
19
|
1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project)
|
20
20
|
1. [Enable billing for your project.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project)
|
21
|
+
1. [Enable the API.](https://console.cloud.google.com/apis/library/videointelligence.googleapis.com)
|
21
22
|
1. {file:AUTHENTICATION.md Set up authentication.}
|
22
23
|
|
23
24
|
## Quick Start
|
@@ -33,6 +34,9 @@ response = client.annotate_video request
|
|
33
34
|
View the [Client Library Documentation](https://googleapis.dev/ruby/google-cloud-video_intelligence-v1/latest)
|
34
35
|
for class and method documentation.
|
35
36
|
|
37
|
+
See also the [Product Documentation](https://cloud.google.com/video-intelligence)
|
38
|
+
for general usage information.
|
39
|
+
|
36
40
|
## Enabling Logging
|
37
41
|
|
38
42
|
To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
|
@@ -27,7 +27,7 @@ module Google
|
|
27
27
|
##
|
28
28
|
# Client for the VideoIntelligenceService service.
|
29
29
|
#
|
30
|
-
# Service that implements
|
30
|
+
# Service that implements the Video Intelligence API.
|
31
31
|
#
|
32
32
|
class Client
|
33
33
|
# @private
|
@@ -68,7 +68,7 @@ module Google
|
|
68
68
|
initial_delay: 1.0,
|
69
69
|
max_delay: 120.0,
|
70
70
|
multiplier: 2.5,
|
71
|
-
retry_codes: [
|
71
|
+
retry_codes: [14, 4]
|
72
72
|
}
|
73
73
|
|
74
74
|
default_config
|
@@ -185,34 +185,37 @@ module Google
|
|
185
185
|
#
|
186
186
|
# @param input_uri [::String]
|
187
187
|
# Input video location. Currently, only
|
188
|
-
# [
|
189
|
-
# supported
|
188
|
+
# [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
189
|
+
# supported. URIs must be specified in the following format:
|
190
190
|
# `gs://bucket-id/object-id` (other URI formats return
|
191
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
192
|
-
# [Request
|
193
|
-
#
|
194
|
-
# multiple videos
|
191
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
192
|
+
# more information, see [Request
|
193
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
|
194
|
+
# multiple videos, a video URI may include wildcards in the `object-id`.
|
195
|
+
# Supported wildcards: '*' to match 0 or more characters;
|
195
196
|
# '?' to match 1 character. If unset, the input video should be embedded
|
196
|
-
# in the request as `input_content`. If set, `input_content`
|
197
|
+
# in the request as `input_content`. If set, `input_content` must be unset.
|
197
198
|
# @param input_content [::String]
|
198
199
|
# The video data bytes.
|
199
|
-
# If unset, the input video(s) should be specified via `input_uri`.
|
200
|
-
# If set, `input_uri`
|
200
|
+
# If unset, the input video(s) should be specified via the `input_uri`.
|
201
|
+
# If set, `input_uri` must be unset.
|
201
202
|
# @param features [::Array<::Google::Cloud::VideoIntelligence::V1::Feature>]
|
202
203
|
# Required. Requested video annotation features.
|
203
204
|
# @param video_context [::Google::Cloud::VideoIntelligence::V1::VideoContext, ::Hash]
|
204
205
|
# Additional video context and/or feature-specific parameters.
|
205
206
|
# @param output_uri [::String]
|
206
207
|
# Optional. Location where the output (in JSON format) should be stored.
|
207
|
-
# Currently, only [
|
208
|
-
# URIs are supported
|
208
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
209
|
+
# URIs are supported. These must be specified in the following format:
|
209
210
|
# `gs://bucket-id/object-id` (other URI formats return
|
210
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
211
|
-
# [Request
|
211
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
212
|
+
# more information, see [Request
|
213
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints).
|
212
214
|
# @param location_id [::String]
|
213
215
|
# Optional. Cloud region where annotation should take place. Supported cloud
|
214
|
-
# regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
215
|
-
# is specified,
|
216
|
+
# regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
217
|
+
# region is specified, the region will be determined based on video file
|
218
|
+
# location.
|
216
219
|
#
|
217
220
|
# @yield [response, operation] Access the result along with the RPC operation
|
218
221
|
# @yieldparam response [::Gapic::Operation]
|
@@ -339,7 +342,7 @@ module Google
|
|
339
342
|
|
340
343
|
config_attr :endpoint, "videointelligence.googleapis.com", ::String
|
341
344
|
config_attr :credentials, nil do |value|
|
342
|
-
allowed = [::String, ::Hash, ::Proc, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
345
|
+
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
343
346
|
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
|
344
347
|
allowed.any? { |klass| klass === value }
|
345
348
|
end
|
@@ -475,7 +475,7 @@ module Google
|
|
475
475
|
|
476
476
|
config_attr :endpoint, "videointelligence.googleapis.com", ::String
|
477
477
|
config_attr :credentials, nil do |value|
|
478
|
-
allowed = [::String, ::Hash, ::Proc, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
478
|
+
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
479
479
|
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
|
480
480
|
allowed.any? { |klass| klass === value }
|
481
481
|
end
|
@@ -28,6 +28,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
28
28
|
optional :face_detection_config, :message, 5, "google.cloud.videointelligence.v1.FaceDetectionConfig"
|
29
29
|
optional :speech_transcription_config, :message, 6, "google.cloud.videointelligence.v1.SpeechTranscriptionConfig"
|
30
30
|
optional :text_detection_config, :message, 8, "google.cloud.videointelligence.v1.TextDetectionConfig"
|
31
|
+
optional :person_detection_config, :message, 11, "google.cloud.videointelligence.v1.PersonDetectionConfig"
|
31
32
|
optional :object_tracking_config, :message, 13, "google.cloud.videointelligence.v1.ObjectTrackingConfig"
|
32
33
|
end
|
33
34
|
add_message "google.cloud.videointelligence.v1.LabelDetectionConfig" do
|
@@ -46,6 +47,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
46
47
|
add_message "google.cloud.videointelligence.v1.FaceDetectionConfig" do
|
47
48
|
optional :model, :string, 1
|
48
49
|
optional :include_bounding_boxes, :bool, 2
|
50
|
+
optional :include_attributes, :bool, 5
|
51
|
+
end
|
52
|
+
add_message "google.cloud.videointelligence.v1.PersonDetectionConfig" do
|
53
|
+
optional :include_bounding_boxes, :bool, 1
|
54
|
+
optional :include_pose_landmarks, :bool, 2
|
55
|
+
optional :include_attributes, :bool, 3
|
49
56
|
end
|
50
57
|
add_message "google.cloud.videointelligence.v1.ExplicitContentDetectionConfig" do
|
51
58
|
optional :model, :string, 1
|
@@ -76,6 +83,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
76
83
|
repeated :category_entities, :message, 2, "google.cloud.videointelligence.v1.Entity"
|
77
84
|
repeated :segments, :message, 3, "google.cloud.videointelligence.v1.LabelSegment"
|
78
85
|
repeated :frames, :message, 4, "google.cloud.videointelligence.v1.LabelFrame"
|
86
|
+
optional :version, :string, 5
|
79
87
|
end
|
80
88
|
add_message "google.cloud.videointelligence.v1.ExplicitContentFrame" do
|
81
89
|
optional :time_offset, :message, 1, "google.protobuf.Duration"
|
@@ -83,6 +91,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
83
91
|
end
|
84
92
|
add_message "google.cloud.videointelligence.v1.ExplicitContentAnnotation" do
|
85
93
|
repeated :frames, :message, 1, "google.cloud.videointelligence.v1.ExplicitContentFrame"
|
94
|
+
optional :version, :string, 2
|
86
95
|
end
|
87
96
|
add_message "google.cloud.videointelligence.v1.NormalizedBoundingBox" do
|
88
97
|
optional :left, :float, 1
|
@@ -90,6 +99,15 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
90
99
|
optional :right, :float, 3
|
91
100
|
optional :bottom, :float, 4
|
92
101
|
end
|
102
|
+
add_message "google.cloud.videointelligence.v1.FaceDetectionAnnotation" do
|
103
|
+
repeated :tracks, :message, 3, "google.cloud.videointelligence.v1.Track"
|
104
|
+
optional :thumbnail, :bytes, 4
|
105
|
+
optional :version, :string, 5
|
106
|
+
end
|
107
|
+
add_message "google.cloud.videointelligence.v1.PersonDetectionAnnotation" do
|
108
|
+
repeated :tracks, :message, 1, "google.cloud.videointelligence.v1.Track"
|
109
|
+
optional :version, :string, 2
|
110
|
+
end
|
93
111
|
add_message "google.cloud.videointelligence.v1.FaceSegment" do
|
94
112
|
optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
|
95
113
|
end
|
@@ -133,12 +151,14 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
133
151
|
repeated :shot_presence_label_annotations, :message, 24, "google.cloud.videointelligence.v1.LabelAnnotation"
|
134
152
|
repeated :frame_label_annotations, :message, 4, "google.cloud.videointelligence.v1.LabelAnnotation"
|
135
153
|
repeated :face_annotations, :message, 5, "google.cloud.videointelligence.v1.FaceAnnotation"
|
154
|
+
repeated :face_detection_annotations, :message, 13, "google.cloud.videointelligence.v1.FaceDetectionAnnotation"
|
136
155
|
repeated :shot_annotations, :message, 6, "google.cloud.videointelligence.v1.VideoSegment"
|
137
156
|
optional :explicit_annotation, :message, 7, "google.cloud.videointelligence.v1.ExplicitContentAnnotation"
|
138
157
|
repeated :speech_transcriptions, :message, 11, "google.cloud.videointelligence.v1.SpeechTranscription"
|
139
158
|
repeated :text_annotations, :message, 12, "google.cloud.videointelligence.v1.TextAnnotation"
|
140
159
|
repeated :object_annotations, :message, 14, "google.cloud.videointelligence.v1.ObjectTrackingAnnotation"
|
141
160
|
repeated :logo_recognition_annotations, :message, 19, "google.cloud.videointelligence.v1.LogoRecognitionAnnotation"
|
161
|
+
repeated :person_detection_annotations, :message, 20, "google.cloud.videointelligence.v1.PersonDetectionAnnotation"
|
142
162
|
optional :error, :message, 9, "google.rpc.Status"
|
143
163
|
end
|
144
164
|
add_message "google.cloud.videointelligence.v1.AnnotateVideoResponse" do
|
@@ -204,6 +224,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
204
224
|
add_message "google.cloud.videointelligence.v1.TextAnnotation" do
|
205
225
|
optional :text, :string, 1
|
206
226
|
repeated :segments, :message, 2, "google.cloud.videointelligence.v1.TextSegment"
|
227
|
+
optional :version, :string, 3
|
207
228
|
end
|
208
229
|
add_message "google.cloud.videointelligence.v1.ObjectTrackingFrame" do
|
209
230
|
optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
|
@@ -213,6 +234,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
213
234
|
optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
|
214
235
|
optional :confidence, :float, 4
|
215
236
|
repeated :frames, :message, 2, "google.cloud.videointelligence.v1.ObjectTrackingFrame"
|
237
|
+
optional :version, :string, 6
|
216
238
|
oneof :track_info do
|
217
239
|
optional :segment, :message, 3, "google.cloud.videointelligence.v1.VideoSegment"
|
218
240
|
optional :track_id, :int64, 5
|
@@ -233,6 +255,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
233
255
|
value :TEXT_DETECTION, 7
|
234
256
|
value :OBJECT_TRACKING, 9
|
235
257
|
value :LOGO_RECOGNITION, 12
|
258
|
+
value :PERSON_DETECTION, 14
|
236
259
|
end
|
237
260
|
add_enum "google.cloud.videointelligence.v1.LabelDetectionMode" do
|
238
261
|
value :LABEL_DETECTION_MODE_UNSPECIFIED, 0
|
@@ -261,6 +284,7 @@ module Google
|
|
261
284
|
ShotChangeDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ShotChangeDetectionConfig").msgclass
|
262
285
|
ObjectTrackingConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ObjectTrackingConfig").msgclass
|
263
286
|
FaceDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceDetectionConfig").msgclass
|
287
|
+
PersonDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.PersonDetectionConfig").msgclass
|
264
288
|
ExplicitContentDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ExplicitContentDetectionConfig").msgclass
|
265
289
|
TextDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.TextDetectionConfig").msgclass
|
266
290
|
VideoSegment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.VideoSegment").msgclass
|
@@ -271,6 +295,8 @@ module Google
|
|
271
295
|
ExplicitContentFrame = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ExplicitContentFrame").msgclass
|
272
296
|
ExplicitContentAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ExplicitContentAnnotation").msgclass
|
273
297
|
NormalizedBoundingBox = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.NormalizedBoundingBox").msgclass
|
298
|
+
FaceDetectionAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceDetectionAnnotation").msgclass
|
299
|
+
PersonDetectionAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.PersonDetectionAnnotation").msgclass
|
274
300
|
FaceSegment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceSegment").msgclass
|
275
301
|
FaceFrame = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceFrame").msgclass
|
276
302
|
FaceAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceAnnotation").msgclass
|
@@ -24,7 +24,7 @@ module Google
|
|
24
24
|
module VideoIntelligence
|
25
25
|
module V1
|
26
26
|
module VideoIntelligenceService
|
27
|
-
# Service that implements
|
27
|
+
# Service that implements the Video Intelligence API.
|
28
28
|
class Service
|
29
29
|
|
30
30
|
include GRPC::GenericService
|
@@ -37,7 +37,7 @@ module Google
|
|
37
37
|
# retrieved through the `google.longrunning.Operations` interface.
|
38
38
|
# `Operation.metadata` contains `AnnotateVideoProgress` (progress).
|
39
39
|
# `Operation.response` contains `AnnotateVideoResponse` (results).
|
40
|
-
rpc :AnnotateVideo, Google::Cloud::VideoIntelligence::V1::AnnotateVideoRequest, Google::Longrunning::Operation
|
40
|
+
rpc :AnnotateVideo, ::Google::Cloud::VideoIntelligence::V1::AnnotateVideoRequest, ::Google::Longrunning::Operation
|
41
41
|
end
|
42
42
|
|
43
43
|
Stub = Service.rpc_stub_class
|
@@ -43,12 +43,12 @@ module Google
|
|
43
43
|
#
|
44
44
|
# The ResourceDescriptor Yaml config will look like:
|
45
45
|
#
|
46
|
-
#
|
47
|
-
#
|
48
|
-
#
|
49
|
-
#
|
50
|
-
#
|
51
|
-
#
|
46
|
+
# resources:
|
47
|
+
# - type: "pubsub.googleapis.com/Topic"
|
48
|
+
# name_descriptor:
|
49
|
+
# - pattern: "projects/{project}/topics/{topic}"
|
50
|
+
# parent_type: "cloudresourcemanager.googleapis.com/Project"
|
51
|
+
# parent_name_extractor: "projects/{project}"
|
52
52
|
#
|
53
53
|
# Sometimes, resources have multiple patterns, typically because they can
|
54
54
|
# live under multiple parents.
|
@@ -183,15 +183,24 @@ module Google
|
|
183
183
|
# }
|
184
184
|
# @!attribute [rw] plural
|
185
185
|
# @return [::String]
|
186
|
-
# The plural name used in the resource name, such as
|
187
|
-
# the name of 'projects/\\{project}'
|
188
|
-
#
|
186
|
+
# The plural name used in the resource name and permission names, such as
|
187
|
+
# 'projects' for the resource name of 'projects/\\{project}' and the permission
|
188
|
+
# name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same
|
189
|
+
# concept of the `plural` field in k8s CRD spec
|
189
190
|
# https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
|
191
|
+
#
|
192
|
+
# Note: The plural form is required even for singleton resources. See
|
193
|
+
# https://aip.dev/156
|
190
194
|
# @!attribute [rw] singular
|
191
195
|
# @return [::String]
|
192
196
|
# The same concept of the `singular` field in k8s CRD spec
|
193
197
|
# https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
|
194
198
|
# Such as "project" for the `resourcemanager.googleapis.com/Project` type.
|
199
|
+
# @!attribute [rw] style
|
200
|
+
# @return [::Array<::Google::Api::ResourceDescriptor::Style>]
|
201
|
+
# Style flag(s) for this resource.
|
202
|
+
# These indicate that a resource is expected to conform to a given
|
203
|
+
# style. See the specific style flags for additional information.
|
195
204
|
class ResourceDescriptor
|
196
205
|
include ::Google::Protobuf::MessageExts
|
197
206
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -211,6 +220,22 @@ module Google
|
|
211
220
|
# that from being necessary once there are multiple patterns.)
|
212
221
|
FUTURE_MULTI_PATTERN = 2
|
213
222
|
end
|
223
|
+
|
224
|
+
# A flag representing a specific style that a resource claims to conform to.
|
225
|
+
module Style
|
226
|
+
# The unspecified value. Do not use.
|
227
|
+
STYLE_UNSPECIFIED = 0
|
228
|
+
|
229
|
+
# This resource is intended to be "declarative-friendly".
|
230
|
+
#
|
231
|
+
# Declarative-friendly resources must be more strictly consistent, and
|
232
|
+
# setting this to true communicates to tools that this resource should
|
233
|
+
# adhere to declarative-friendly expectations.
|
234
|
+
#
|
235
|
+
# Note: This is used by the API linter (linter.aip.dev) to enable
|
236
|
+
# additional checks.
|
237
|
+
DECLARATIVE_FRIENDLY = 1
|
238
|
+
end
|
214
239
|
end
|
215
240
|
|
216
241
|
# Defines a proto annotation that describes a string field that refers to
|
@@ -226,6 +251,17 @@ module Google
|
|
226
251
|
# type: "pubsub.googleapis.com/Topic"
|
227
252
|
# }];
|
228
253
|
# }
|
254
|
+
#
|
255
|
+
# Occasionally, a field may reference an arbitrary resource. In this case,
|
256
|
+
# APIs use the special value * in their resource reference.
|
257
|
+
#
|
258
|
+
# Example:
|
259
|
+
#
|
260
|
+
# message GetIamPolicyRequest {
|
261
|
+
# string resource = 2 [(google.api.resource_reference) = {
|
262
|
+
# type: "*"
|
263
|
+
# }];
|
264
|
+
# }
|
229
265
|
# @!attribute [rw] child_type
|
230
266
|
# @return [::String]
|
231
267
|
# The resource type of a child collection that the annotated field
|
@@ -234,11 +270,11 @@ module Google
|
|
234
270
|
#
|
235
271
|
# Example:
|
236
272
|
#
|
237
|
-
#
|
238
|
-
#
|
239
|
-
#
|
240
|
-
#
|
241
|
-
#
|
273
|
+
# message ListLogEntriesRequest {
|
274
|
+
# string parent = 1 [(google.api.resource_reference) = {
|
275
|
+
# child_type: "logging.googleapis.com/LogEntry"
|
276
|
+
# };
|
277
|
+
# }
|
242
278
|
class ResourceReference
|
243
279
|
include ::Google::Protobuf::MessageExts
|
244
280
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -25,20 +25,21 @@ module Google
|
|
25
25
|
# @!attribute [rw] input_uri
|
26
26
|
# @return [::String]
|
27
27
|
# Input video location. Currently, only
|
28
|
-
# [
|
29
|
-
# supported
|
28
|
+
# [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
29
|
+
# supported. URIs must be specified in the following format:
|
30
30
|
# `gs://bucket-id/object-id` (other URI formats return
|
31
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
32
|
-
# [Request
|
33
|
-
#
|
34
|
-
# multiple videos
|
31
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
32
|
+
# more information, see [Request
|
33
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
|
34
|
+
# multiple videos, a video URI may include wildcards in the `object-id`.
|
35
|
+
# Supported wildcards: '*' to match 0 or more characters;
|
35
36
|
# '?' to match 1 character. If unset, the input video should be embedded
|
36
|
-
# in the request as `input_content`. If set, `input_content`
|
37
|
+
# in the request as `input_content`. If set, `input_content` must be unset.
|
37
38
|
# @!attribute [rw] input_content
|
38
39
|
# @return [::String]
|
39
40
|
# The video data bytes.
|
40
|
-
# If unset, the input video(s) should be specified via `input_uri`.
|
41
|
-
# If set, `input_uri`
|
41
|
+
# If unset, the input video(s) should be specified via the `input_uri`.
|
42
|
+
# If set, `input_uri` must be unset.
|
42
43
|
# @!attribute [rw] features
|
43
44
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::Feature>]
|
44
45
|
# Required. Requested video annotation features.
|
@@ -48,16 +49,18 @@ module Google
|
|
48
49
|
# @!attribute [rw] output_uri
|
49
50
|
# @return [::String]
|
50
51
|
# Optional. Location where the output (in JSON format) should be stored.
|
51
|
-
# Currently, only [
|
52
|
-
# URIs are supported
|
52
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
53
|
+
# URIs are supported. These must be specified in the following format:
|
53
54
|
# `gs://bucket-id/object-id` (other URI formats return
|
54
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
55
|
-
# [Request
|
55
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
56
|
+
# more information, see [Request
|
57
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints).
|
56
58
|
# @!attribute [rw] location_id
|
57
59
|
# @return [::String]
|
58
60
|
# Optional. Cloud region where annotation should take place. Supported cloud
|
59
|
-
# regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
60
|
-
# is specified,
|
61
|
+
# regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
62
|
+
# region is specified, the region will be determined based on video file
|
63
|
+
# location.
|
61
64
|
class AnnotateVideoRequest
|
62
65
|
include ::Google::Protobuf::MessageExts
|
63
66
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -87,6 +90,9 @@ module Google
|
|
87
90
|
# @!attribute [rw] text_detection_config
|
88
91
|
# @return [::Google::Cloud::VideoIntelligence::V1::TextDetectionConfig]
|
89
92
|
# Config for TEXT_DETECTION.
|
93
|
+
# @!attribute [rw] person_detection_config
|
94
|
+
# @return [::Google::Cloud::VideoIntelligence::V1::PersonDetectionConfig]
|
95
|
+
# Config for PERSON_DETECTION.
|
90
96
|
# @!attribute [rw] object_tracking_config
|
91
97
|
# @return [::Google::Cloud::VideoIntelligence::V1::ObjectTrackingConfig]
|
92
98
|
# Config for OBJECT_TRACKING.
|
@@ -103,9 +109,9 @@ module Google
|
|
103
109
|
# If unspecified, defaults to `SHOT_MODE`.
|
104
110
|
# @!attribute [rw] stationary_camera
|
105
111
|
# @return [::Boolean]
|
106
|
-
# Whether the video has been shot from a stationary (i.e
|
107
|
-
# When set to true, might improve detection accuracy for moving
|
108
|
-
# Should be used with `SHOT_AND_FRAME_MODE` enabled.
|
112
|
+
# Whether the video has been shot from a stationary (i.e., non-moving)
|
113
|
+
# camera. When set to true, might improve detection accuracy for moving
|
114
|
+
# objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
|
109
115
|
# @!attribute [rw] model
|
110
116
|
# @return [::String]
|
111
117
|
# Model to use for label detection.
|
@@ -117,15 +123,15 @@ module Google
|
|
117
123
|
# frame-level detection. If not set, it is set to 0.4 by default. The valid
|
118
124
|
# range for this threshold is [0.1, 0.9]. Any value set outside of this
|
119
125
|
# range will be clipped.
|
120
|
-
# Note:
|
126
|
+
# Note: For best results, follow the default threshold. We will update
|
121
127
|
# the default threshold everytime when we release a new model.
|
122
128
|
# @!attribute [rw] video_confidence_threshold
|
123
129
|
# @return [::Float]
|
124
130
|
# The confidence threshold we perform filtering on the labels from
|
125
|
-
# video-level and shot-level detections. If not set, it
|
131
|
+
# video-level and shot-level detections. If not set, it's set to 0.3 by
|
126
132
|
# default. The valid range for this threshold is [0.1, 0.9]. Any value set
|
127
133
|
# outside of this range will be clipped.
|
128
|
-
# Note:
|
134
|
+
# Note: For best results, follow the default threshold. We will update
|
129
135
|
# the default threshold everytime when we release a new model.
|
130
136
|
class LabelDetectionConfig
|
131
137
|
include ::Google::Protobuf::MessageExts
|
@@ -162,12 +168,36 @@ module Google
|
|
162
168
|
# "builtin/latest".
|
163
169
|
# @!attribute [rw] include_bounding_boxes
|
164
170
|
# @return [::Boolean]
|
165
|
-
# Whether bounding boxes
|
171
|
+
# Whether bounding boxes are included in the face annotation output.
|
172
|
+
# @!attribute [rw] include_attributes
|
173
|
+
# @return [::Boolean]
|
174
|
+
# Whether to enable face attributes detection, such as glasses, dark_glasses,
|
175
|
+
# mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
|
166
176
|
class FaceDetectionConfig
|
167
177
|
include ::Google::Protobuf::MessageExts
|
168
178
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
169
179
|
end
|
170
180
|
|
181
|
+
# Config for PERSON_DETECTION.
|
182
|
+
# @!attribute [rw] include_bounding_boxes
|
183
|
+
# @return [::Boolean]
|
184
|
+
# Whether bounding boxes are included in the person detection annotation
|
185
|
+
# output.
|
186
|
+
# @!attribute [rw] include_pose_landmarks
|
187
|
+
# @return [::Boolean]
|
188
|
+
# Whether to enable pose landmarks detection. Ignored if
|
189
|
+
# 'include_bounding_boxes' is set to false.
|
190
|
+
# @!attribute [rw] include_attributes
|
191
|
+
# @return [::Boolean]
|
192
|
+
# Whether to enable person attributes detection, such as cloth color (black,
|
193
|
+
# blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
|
194
|
+
# etc.
|
195
|
+
# Ignored if 'include_bounding_boxes' is set to false.
|
196
|
+
class PersonDetectionConfig
|
197
|
+
include ::Google::Protobuf::MessageExts
|
198
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
199
|
+
end
|
200
|
+
|
171
201
|
# Config for EXPLICIT_CONTENT_DETECTION.
|
172
202
|
# @!attribute [rw] model
|
173
203
|
# @return [::String]
|
@@ -244,7 +274,7 @@ module Google
|
|
244
274
|
# API](https://developers.google.com/knowledge-graph/).
|
245
275
|
# @!attribute [rw] description
|
246
276
|
# @return [::String]
|
247
|
-
# Textual description, e.g
|
277
|
+
# Textual description, e.g., `Fixed-gear bicycle`.
|
248
278
|
# @!attribute [rw] language_code
|
249
279
|
# @return [::String]
|
250
280
|
# Language code for `description` in BCP-47 format.
|
@@ -260,15 +290,18 @@ module Google
|
|
260
290
|
# @!attribute [rw] category_entities
|
261
291
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::Entity>]
|
262
292
|
# Common categories for the detected entity.
|
263
|
-
#
|
264
|
-
# cases there might be more than one categories e.g
|
265
|
-
# a `pet`.
|
293
|
+
# For example, when the label is `Terrier`, the category is likely `dog`. And
|
294
|
+
# in some cases there might be more than one categories e.g., `Terrier` could
|
295
|
+
# also be a `pet`.
|
266
296
|
# @!attribute [rw] segments
|
267
297
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelSegment>]
|
268
298
|
# All video segments where a label was detected.
|
269
299
|
# @!attribute [rw] frames
|
270
300
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelFrame>]
|
271
301
|
# All video frames where a label was detected.
|
302
|
+
# @!attribute [rw] version
|
303
|
+
# @return [::String]
|
304
|
+
# Feature version.
|
272
305
|
class LabelAnnotation
|
273
306
|
include ::Google::Protobuf::MessageExts
|
274
307
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -293,6 +326,9 @@ module Google
|
|
293
326
|
# @!attribute [rw] frames
|
294
327
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::ExplicitContentFrame>]
|
295
328
|
# All video frames where explicit content was detected.
|
329
|
+
# @!attribute [rw] version
|
330
|
+
# @return [::String]
|
331
|
+
# Feature version.
|
296
332
|
class ExplicitContentAnnotation
|
297
333
|
include ::Google::Protobuf::MessageExts
|
298
334
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -318,6 +354,33 @@ module Google
|
|
318
354
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
319
355
|
end
|
320
356
|
|
357
|
+
# Face detection annotation.
|
358
|
+
# @!attribute [rw] tracks
|
359
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::Track>]
|
360
|
+
# The face tracks with attributes.
|
361
|
+
# @!attribute [rw] thumbnail
|
362
|
+
# @return [::String]
|
363
|
+
# The thumbnail of a person's face.
|
364
|
+
# @!attribute [rw] version
|
365
|
+
# @return [::String]
|
366
|
+
# Feature version.
|
367
|
+
class FaceDetectionAnnotation
|
368
|
+
include ::Google::Protobuf::MessageExts
|
369
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
370
|
+
end
|
371
|
+
|
372
|
+
# Person detection annotation per video.
|
373
|
+
# @!attribute [rw] tracks
|
374
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::Track>]
|
375
|
+
# The detected tracks of a person.
|
376
|
+
# @!attribute [rw] version
|
377
|
+
# @return [::String]
|
378
|
+
# Feature version.
|
379
|
+
class PersonDetectionAnnotation
|
380
|
+
include ::Google::Protobuf::MessageExts
|
381
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
382
|
+
end
|
383
|
+
|
321
384
|
# Video segment level annotation results for face detection.
|
322
385
|
# @!attribute [rw] segment
|
323
386
|
# @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
|
@@ -327,7 +390,7 @@ module Google
|
|
327
390
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
328
391
|
end
|
329
392
|
|
330
|
-
#
|
393
|
+
# Deprecated. No effect.
|
331
394
|
# @!attribute [rw] normalized_bounding_boxes
|
332
395
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox>]
|
333
396
|
# Normalized Bounding boxes in a frame.
|
@@ -342,7 +405,7 @@ module Google
|
|
342
405
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
343
406
|
end
|
344
407
|
|
345
|
-
#
|
408
|
+
# Deprecated. No effect.
|
346
409
|
# @!attribute [rw] thumbnail
|
347
410
|
# @return [::String]
|
348
411
|
# Thumbnail of a representative face view (in JPEG format).
|
@@ -399,7 +462,7 @@ module Google
|
|
399
462
|
# A generic detected attribute represented by name in string format.
|
400
463
|
# @!attribute [rw] name
|
401
464
|
# @return [::String]
|
402
|
-
# The name of the attribute,
|
465
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
403
466
|
# A full list of supported type names will be provided in the document.
|
404
467
|
# @!attribute [rw] confidence
|
405
468
|
# @return [::Float]
|
@@ -417,7 +480,7 @@ module Google
|
|
417
480
|
# location.
|
418
481
|
# @!attribute [rw] name
|
419
482
|
# @return [::String]
|
420
|
-
# The name of this landmark,
|
483
|
+
# The name of this landmark, for example, left_hand, right_shoulder.
|
421
484
|
# @!attribute [rw] point
|
422
485
|
# @return [::Google::Cloud::VideoIntelligence::V1::NormalizedVertex]
|
423
486
|
# The 2D point of the detected landmark using the normalized image
|
@@ -434,17 +497,17 @@ module Google
|
|
434
497
|
# @!attribute [rw] input_uri
|
435
498
|
# @return [::String]
|
436
499
|
# Video file location in
|
437
|
-
# [
|
500
|
+
# [Cloud Storage](https://cloud.google.com/storage/).
|
438
501
|
# @!attribute [rw] segment
|
439
502
|
# @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
|
440
503
|
# Video segment on which the annotation is run.
|
441
504
|
# @!attribute [rw] segment_label_annotations
|
442
505
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
|
443
|
-
# Topical label annotations on video level or user
|
506
|
+
# Topical label annotations on video level or user-specified segment level.
|
444
507
|
# There is exactly one element for each unique label.
|
445
508
|
# @!attribute [rw] segment_presence_label_annotations
|
446
509
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
|
447
|
-
# Presence label annotations on video level or user
|
510
|
+
# Presence label annotations on video level or user-specified segment level.
|
448
511
|
# There is exactly one element for each unique label. Compared to the
|
449
512
|
# existing topical `segment_label_annotations`, this field presents more
|
450
513
|
# fine-grained, segment-level labels detected in video content and is made
|
@@ -467,7 +530,10 @@ module Google
|
|
467
530
|
# There is exactly one element for each unique label.
|
468
531
|
# @!attribute [rw] face_annotations
|
469
532
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::FaceAnnotation>]
|
470
|
-
#
|
533
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
534
|
+
# @!attribute [rw] face_detection_annotations
|
535
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::FaceDetectionAnnotation>]
|
536
|
+
# Face detection annotations.
|
471
537
|
# @!attribute [rw] shot_annotations
|
472
538
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::VideoSegment>]
|
473
539
|
# Shot annotations. Each shot is represented as a video segment.
|
@@ -488,6 +554,9 @@ module Google
|
|
488
554
|
# @!attribute [rw] logo_recognition_annotations
|
489
555
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LogoRecognitionAnnotation>]
|
490
556
|
# Annotations for list of logos detected, tracked and recognized in video.
|
557
|
+
# @!attribute [rw] person_detection_annotations
|
558
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::PersonDetectionAnnotation>]
|
559
|
+
# Person detection annotations.
|
491
560
|
# @!attribute [rw] error
|
492
561
|
# @return [::Google::Rpc::Status]
|
493
562
|
# If set, indicates an error. Note that for a single `AnnotateVideoRequest`
|
@@ -512,7 +581,7 @@ module Google
|
|
512
581
|
# @!attribute [rw] input_uri
|
513
582
|
# @return [::String]
|
514
583
|
# Video file location in
|
515
|
-
# [
|
584
|
+
# [Cloud Storage](https://cloud.google.com/storage/).
|
516
585
|
# @!attribute [rw] progress_percent
|
517
586
|
# @return [::Integer]
|
518
587
|
# Approximate percentage processed thus far. Guaranteed to be
|
@@ -526,11 +595,11 @@ module Google
|
|
526
595
|
# @!attribute [rw] feature
|
527
596
|
# @return [::Google::Cloud::VideoIntelligence::V1::Feature]
|
528
597
|
# Specifies which feature is being tracked if the request contains more than
|
529
|
-
# one
|
598
|
+
# one feature.
|
530
599
|
# @!attribute [rw] segment
|
531
600
|
# @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
|
532
601
|
# Specifies which segment is being tracked if the request contains more than
|
533
|
-
# one
|
602
|
+
# one segment.
|
534
603
|
class VideoAnnotationProgress
|
535
604
|
include ::Google::Protobuf::MessageExts
|
536
605
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -589,14 +658,14 @@ module Google
|
|
589
658
|
# the top alternative of the recognition result using a speaker_tag provided
|
590
659
|
# in the WordInfo.
|
591
660
|
# Note: When this is true, we send all the words from the beginning of the
|
592
|
-
# audio for the top alternative in every consecutive
|
661
|
+
# audio for the top alternative in every consecutive response.
|
593
662
|
# This is done in order to improve our speaker tags as our models learn to
|
594
663
|
# identify the speakers in the conversation over time.
|
595
664
|
# @!attribute [rw] diarization_speaker_count
|
596
665
|
# @return [::Integer]
|
597
|
-
# Optional. If set, specifies the estimated number of speakers in the
|
598
|
-
# If not set, defaults to '2'.
|
599
|
-
#
|
666
|
+
# Optional. If set, specifies the estimated number of speakers in the
|
667
|
+
# conversation. If not set, defaults to '2'. Ignored unless
|
668
|
+
# enable_speaker_diarization is set to true.
|
600
669
|
# @!attribute [rw] enable_word_confidence
|
601
670
|
# @return [::Boolean]
|
602
671
|
# Optional. If `true`, the top result includes a list of words and the
|
@@ -631,9 +700,9 @@ module Google
|
|
631
700
|
# ranked by the recognizer.
|
632
701
|
# @!attribute [r] language_code
|
633
702
|
# @return [::String]
|
634
|
-
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
635
|
-
# the language in this result. This language code was
|
636
|
-
# most likelihood of being spoken in the audio.
|
703
|
+
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
704
|
+
# language tag of the language in this result. This language code was
|
705
|
+
# detected to have the most likelihood of being spoken in the audio.
|
637
706
|
class SpeechTranscription
|
638
707
|
include ::Google::Protobuf::MessageExts
|
639
708
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -654,8 +723,8 @@ module Google
|
|
654
723
|
# @!attribute [r] words
|
655
724
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::WordInfo>]
|
656
725
|
# Output only. A list of word-specific information for each recognized word.
|
657
|
-
# Note: When `enable_speaker_diarization` is true, you will see all
|
658
|
-
# from the beginning of the audio.
|
726
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
727
|
+
# the words from the beginning of the audio.
|
659
728
|
class SpeechRecognitionAlternative
|
660
729
|
include ::Google::Protobuf::MessageExts
|
661
730
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -776,6 +845,9 @@ module Google
|
|
776
845
|
# @!attribute [rw] segments
|
777
846
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::TextSegment>]
|
778
847
|
# All video segments where OCR detected text appears.
|
848
|
+
# @!attribute [rw] version
|
849
|
+
# @return [::String]
|
850
|
+
# Feature version.
|
779
851
|
class TextAnnotation
|
780
852
|
include ::Google::Protobuf::MessageExts
|
781
853
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -819,6 +891,9 @@ module Google
|
|
819
891
|
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
820
892
|
# messages in frames.
|
821
893
|
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
894
|
+
# @!attribute [rw] version
|
895
|
+
# @return [::String]
|
896
|
+
# Feature version.
|
822
897
|
class ObjectTrackingAnnotation
|
823
898
|
include ::Google::Protobuf::MessageExts
|
824
899
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -856,7 +931,7 @@ module Google
|
|
856
931
|
# Explicit content detection.
|
857
932
|
EXPLICIT_CONTENT_DETECTION = 3
|
858
933
|
|
859
|
-
# Human face detection
|
934
|
+
# Human face detection.
|
860
935
|
FACE_DETECTION = 4
|
861
936
|
|
862
937
|
# Speech transcription.
|
@@ -870,6 +945,9 @@ module Google
|
|
870
945
|
|
871
946
|
# Logo detection, tracking, and recognition.
|
872
947
|
LOGO_RECOGNITION = 12
|
948
|
+
|
949
|
+
# Person detection.
|
950
|
+
PERSON_DETECTION = 14
|
873
951
|
end
|
874
952
|
|
875
953
|
# Label detection mode.
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-video_intelligence-v1
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.4.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Google LLC
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
11
|
+
date: 2020-12-08 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: gapic-common
|
@@ -16,14 +16,14 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - "~>"
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: '0.
|
19
|
+
version: '0.3'
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - "~>"
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: '0.
|
26
|
+
version: '0.3'
|
27
27
|
- !ruby/object:Gem::Dependency
|
28
28
|
name: google-cloud-errors
|
29
29
|
requirement: !ruby/object:Gem::Requirement
|
@@ -200,7 +200,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
200
200
|
- !ruby/object:Gem::Version
|
201
201
|
version: '0'
|
202
202
|
requirements: []
|
203
|
-
rubygems_version: 3.1.
|
203
|
+
rubygems_version: 3.1.4
|
204
204
|
signing_key:
|
205
205
|
specification_version: 4
|
206
206
|
summary: API Client library for the Cloud Video Intelligence V1 API
|