google-cloud-video_intelligence-v1 0.2.1 → 0.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +4 -0
- data/lib/google/cloud/video_intelligence/v1/version.rb +1 -1
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service.rb +1 -1
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/client.rb +22 -19
- data/lib/google/cloud/video_intelligence/v1/video_intelligence_service/operations.rb +2 -2
- data/lib/google/cloud/videointelligence/v1/video_intelligence_pb.rb +24 -0
- data/lib/google/cloud/videointelligence/v1/video_intelligence_services_pb.rb +2 -2
- data/proto_docs/google/api/resource.rb +50 -14
- data/proto_docs/google/cloud/videointelligence/v1/video_intelligence.rb +119 -47
- metadata +5 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: db1635bb6e368ea6d1276625f5dabb0395facab6b045831e86f30df6b5b9187f
|
4
|
+
data.tar.gz: 2bd31f31ff43ceaf662157861aa27a0fa6bc69ee2639bb01a8b14bd257b3b2dd
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ee16cfb10f35ef67cb91517a72e1d4407b7b012e5586c80a25914a3fcf541dd1a3c3aee138c19affd970087bbc11bd525664b13cee3bc5637e45f2c352a6d282
|
7
|
+
data.tar.gz: d7df1c02024e882ed668773813b9f6734c63277c968c8aa3894f5157934d3fea676390bcec7d0a40ca7d53eb3e3c79d0c822d7a69b9e17e257bf5a0f70d3b521
|
data/README.md
CHANGED
@@ -18,6 +18,7 @@ In order to use this library, you first need to go through the following steps:
|
|
18
18
|
|
19
19
|
1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project)
|
20
20
|
1. [Enable billing for your project.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project)
|
21
|
+
1. [Enable the API.](https://console.cloud.google.com/apis/library/videointelligence.googleapis.com)
|
21
22
|
1. {file:AUTHENTICATION.md Set up authentication.}
|
22
23
|
|
23
24
|
## Quick Start
|
@@ -33,6 +34,9 @@ response = client.annotate_video request
|
|
33
34
|
View the [Client Library Documentation](https://googleapis.dev/ruby/google-cloud-video_intelligence-v1/latest)
|
34
35
|
for class and method documentation.
|
35
36
|
|
37
|
+
See also the [Product Documentation](https://cloud.google.com/video-intelligence)
|
38
|
+
for general usage information.
|
39
|
+
|
36
40
|
## Enabling Logging
|
37
41
|
|
38
42
|
To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
|
@@ -27,7 +27,7 @@ module Google
|
|
27
27
|
##
|
28
28
|
# Client for the VideoIntelligenceService service.
|
29
29
|
#
|
30
|
-
# Service that implements
|
30
|
+
# Service that implements the Video Intelligence API.
|
31
31
|
#
|
32
32
|
class Client
|
33
33
|
# @private
|
@@ -68,7 +68,7 @@ module Google
|
|
68
68
|
initial_delay: 1.0,
|
69
69
|
max_delay: 120.0,
|
70
70
|
multiplier: 2.5,
|
71
|
-
retry_codes: [
|
71
|
+
retry_codes: [14, 4]
|
72
72
|
}
|
73
73
|
|
74
74
|
default_config
|
@@ -185,34 +185,37 @@ module Google
|
|
185
185
|
#
|
186
186
|
# @param input_uri [::String]
|
187
187
|
# Input video location. Currently, only
|
188
|
-
# [
|
189
|
-
# supported
|
188
|
+
# [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
189
|
+
# supported. URIs must be specified in the following format:
|
190
190
|
# `gs://bucket-id/object-id` (other URI formats return
|
191
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
192
|
-
# [Request
|
193
|
-
#
|
194
|
-
# multiple videos
|
191
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
192
|
+
# more information, see [Request
|
193
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
|
194
|
+
# multiple videos, a video URI may include wildcards in the `object-id`.
|
195
|
+
# Supported wildcards: '*' to match 0 or more characters;
|
195
196
|
# '?' to match 1 character. If unset, the input video should be embedded
|
196
|
-
# in the request as `input_content`. If set, `input_content`
|
197
|
+
# in the request as `input_content`. If set, `input_content` must be unset.
|
197
198
|
# @param input_content [::String]
|
198
199
|
# The video data bytes.
|
199
|
-
# If unset, the input video(s) should be specified via `input_uri`.
|
200
|
-
# If set, `input_uri`
|
200
|
+
# If unset, the input video(s) should be specified via the `input_uri`.
|
201
|
+
# If set, `input_uri` must be unset.
|
201
202
|
# @param features [::Array<::Google::Cloud::VideoIntelligence::V1::Feature>]
|
202
203
|
# Required. Requested video annotation features.
|
203
204
|
# @param video_context [::Google::Cloud::VideoIntelligence::V1::VideoContext, ::Hash]
|
204
205
|
# Additional video context and/or feature-specific parameters.
|
205
206
|
# @param output_uri [::String]
|
206
207
|
# Optional. Location where the output (in JSON format) should be stored.
|
207
|
-
# Currently, only [
|
208
|
-
# URIs are supported
|
208
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
209
|
+
# URIs are supported. These must be specified in the following format:
|
209
210
|
# `gs://bucket-id/object-id` (other URI formats return
|
210
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
211
|
-
# [Request
|
211
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
212
|
+
# more information, see [Request
|
213
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints).
|
212
214
|
# @param location_id [::String]
|
213
215
|
# Optional. Cloud region where annotation should take place. Supported cloud
|
214
|
-
# regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
215
|
-
# is specified,
|
216
|
+
# regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
217
|
+
# region is specified, the region will be determined based on video file
|
218
|
+
# location.
|
216
219
|
#
|
217
220
|
# @yield [response, operation] Access the result along with the RPC operation
|
218
221
|
# @yieldparam response [::Gapic::Operation]
|
@@ -339,7 +342,7 @@ module Google
|
|
339
342
|
|
340
343
|
config_attr :endpoint, "videointelligence.googleapis.com", ::String
|
341
344
|
config_attr :credentials, nil do |value|
|
342
|
-
allowed = [::String, ::Hash, ::Proc, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
345
|
+
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
343
346
|
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
|
344
347
|
allowed.any? { |klass| klass === value }
|
345
348
|
end
|
@@ -367,7 +370,7 @@ module Google
|
|
367
370
|
def rpcs
|
368
371
|
@rpcs ||= begin
|
369
372
|
parent_rpcs = nil
|
370
|
-
parent_rpcs = @parent_config.rpcs if @parent_config&.respond_to?
|
373
|
+
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config&.respond_to?(:rpcs)
|
371
374
|
Rpcs.new parent_rpcs
|
372
375
|
end
|
373
376
|
end
|
@@ -475,7 +475,7 @@ module Google
|
|
475
475
|
|
476
476
|
config_attr :endpoint, "videointelligence.googleapis.com", ::String
|
477
477
|
config_attr :credentials, nil do |value|
|
478
|
-
allowed = [::String, ::Hash, ::Proc, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
478
|
+
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
|
479
479
|
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
|
480
480
|
allowed.any? { |klass| klass === value }
|
481
481
|
end
|
@@ -503,7 +503,7 @@ module Google
|
|
503
503
|
def rpcs
|
504
504
|
@rpcs ||= begin
|
505
505
|
parent_rpcs = nil
|
506
|
-
parent_rpcs = @parent_config.rpcs if @parent_config&.respond_to?
|
506
|
+
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config&.respond_to?(:rpcs)
|
507
507
|
Rpcs.new parent_rpcs
|
508
508
|
end
|
509
509
|
end
|
@@ -28,6 +28,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
28
28
|
optional :face_detection_config, :message, 5, "google.cloud.videointelligence.v1.FaceDetectionConfig"
|
29
29
|
optional :speech_transcription_config, :message, 6, "google.cloud.videointelligence.v1.SpeechTranscriptionConfig"
|
30
30
|
optional :text_detection_config, :message, 8, "google.cloud.videointelligence.v1.TextDetectionConfig"
|
31
|
+
optional :person_detection_config, :message, 11, "google.cloud.videointelligence.v1.PersonDetectionConfig"
|
31
32
|
optional :object_tracking_config, :message, 13, "google.cloud.videointelligence.v1.ObjectTrackingConfig"
|
32
33
|
end
|
33
34
|
add_message "google.cloud.videointelligence.v1.LabelDetectionConfig" do
|
@@ -46,6 +47,12 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
46
47
|
add_message "google.cloud.videointelligence.v1.FaceDetectionConfig" do
|
47
48
|
optional :model, :string, 1
|
48
49
|
optional :include_bounding_boxes, :bool, 2
|
50
|
+
optional :include_attributes, :bool, 5
|
51
|
+
end
|
52
|
+
add_message "google.cloud.videointelligence.v1.PersonDetectionConfig" do
|
53
|
+
optional :include_bounding_boxes, :bool, 1
|
54
|
+
optional :include_pose_landmarks, :bool, 2
|
55
|
+
optional :include_attributes, :bool, 3
|
49
56
|
end
|
50
57
|
add_message "google.cloud.videointelligence.v1.ExplicitContentDetectionConfig" do
|
51
58
|
optional :model, :string, 1
|
@@ -76,6 +83,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
76
83
|
repeated :category_entities, :message, 2, "google.cloud.videointelligence.v1.Entity"
|
77
84
|
repeated :segments, :message, 3, "google.cloud.videointelligence.v1.LabelSegment"
|
78
85
|
repeated :frames, :message, 4, "google.cloud.videointelligence.v1.LabelFrame"
|
86
|
+
optional :version, :string, 5
|
79
87
|
end
|
80
88
|
add_message "google.cloud.videointelligence.v1.ExplicitContentFrame" do
|
81
89
|
optional :time_offset, :message, 1, "google.protobuf.Duration"
|
@@ -83,6 +91,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
83
91
|
end
|
84
92
|
add_message "google.cloud.videointelligence.v1.ExplicitContentAnnotation" do
|
85
93
|
repeated :frames, :message, 1, "google.cloud.videointelligence.v1.ExplicitContentFrame"
|
94
|
+
optional :version, :string, 2
|
86
95
|
end
|
87
96
|
add_message "google.cloud.videointelligence.v1.NormalizedBoundingBox" do
|
88
97
|
optional :left, :float, 1
|
@@ -90,6 +99,13 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
90
99
|
optional :right, :float, 3
|
91
100
|
optional :bottom, :float, 4
|
92
101
|
end
|
102
|
+
add_message "google.cloud.videointelligence.v1.FaceDetectionAnnotation" do
|
103
|
+
optional :version, :string, 5
|
104
|
+
end
|
105
|
+
add_message "google.cloud.videointelligence.v1.PersonDetectionAnnotation" do
|
106
|
+
repeated :tracks, :message, 1, "google.cloud.videointelligence.v1.Track"
|
107
|
+
optional :version, :string, 2
|
108
|
+
end
|
93
109
|
add_message "google.cloud.videointelligence.v1.FaceSegment" do
|
94
110
|
optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
|
95
111
|
end
|
@@ -133,12 +149,14 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
133
149
|
repeated :shot_presence_label_annotations, :message, 24, "google.cloud.videointelligence.v1.LabelAnnotation"
|
134
150
|
repeated :frame_label_annotations, :message, 4, "google.cloud.videointelligence.v1.LabelAnnotation"
|
135
151
|
repeated :face_annotations, :message, 5, "google.cloud.videointelligence.v1.FaceAnnotation"
|
152
|
+
repeated :face_detection_annotations, :message, 13, "google.cloud.videointelligence.v1.FaceDetectionAnnotation"
|
136
153
|
repeated :shot_annotations, :message, 6, "google.cloud.videointelligence.v1.VideoSegment"
|
137
154
|
optional :explicit_annotation, :message, 7, "google.cloud.videointelligence.v1.ExplicitContentAnnotation"
|
138
155
|
repeated :speech_transcriptions, :message, 11, "google.cloud.videointelligence.v1.SpeechTranscription"
|
139
156
|
repeated :text_annotations, :message, 12, "google.cloud.videointelligence.v1.TextAnnotation"
|
140
157
|
repeated :object_annotations, :message, 14, "google.cloud.videointelligence.v1.ObjectTrackingAnnotation"
|
141
158
|
repeated :logo_recognition_annotations, :message, 19, "google.cloud.videointelligence.v1.LogoRecognitionAnnotation"
|
159
|
+
repeated :person_detection_annotations, :message, 20, "google.cloud.videointelligence.v1.PersonDetectionAnnotation"
|
142
160
|
optional :error, :message, 9, "google.rpc.Status"
|
143
161
|
end
|
144
162
|
add_message "google.cloud.videointelligence.v1.AnnotateVideoResponse" do
|
@@ -204,6 +222,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
204
222
|
add_message "google.cloud.videointelligence.v1.TextAnnotation" do
|
205
223
|
optional :text, :string, 1
|
206
224
|
repeated :segments, :message, 2, "google.cloud.videointelligence.v1.TextSegment"
|
225
|
+
optional :version, :string, 3
|
207
226
|
end
|
208
227
|
add_message "google.cloud.videointelligence.v1.ObjectTrackingFrame" do
|
209
228
|
optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
|
@@ -213,6 +232,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
213
232
|
optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
|
214
233
|
optional :confidence, :float, 4
|
215
234
|
repeated :frames, :message, 2, "google.cloud.videointelligence.v1.ObjectTrackingFrame"
|
235
|
+
optional :version, :string, 6
|
216
236
|
oneof :track_info do
|
217
237
|
optional :segment, :message, 3, "google.cloud.videointelligence.v1.VideoSegment"
|
218
238
|
optional :track_id, :int64, 5
|
@@ -233,6 +253,7 @@ Google::Protobuf::DescriptorPool.generated_pool.build do
|
|
233
253
|
value :TEXT_DETECTION, 7
|
234
254
|
value :OBJECT_TRACKING, 9
|
235
255
|
value :LOGO_RECOGNITION, 12
|
256
|
+
value :PERSON_DETECTION, 14
|
236
257
|
end
|
237
258
|
add_enum "google.cloud.videointelligence.v1.LabelDetectionMode" do
|
238
259
|
value :LABEL_DETECTION_MODE_UNSPECIFIED, 0
|
@@ -261,6 +282,7 @@ module Google
|
|
261
282
|
ShotChangeDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ShotChangeDetectionConfig").msgclass
|
262
283
|
ObjectTrackingConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ObjectTrackingConfig").msgclass
|
263
284
|
FaceDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceDetectionConfig").msgclass
|
285
|
+
PersonDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.PersonDetectionConfig").msgclass
|
264
286
|
ExplicitContentDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ExplicitContentDetectionConfig").msgclass
|
265
287
|
TextDetectionConfig = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.TextDetectionConfig").msgclass
|
266
288
|
VideoSegment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.VideoSegment").msgclass
|
@@ -271,6 +293,8 @@ module Google
|
|
271
293
|
ExplicitContentFrame = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ExplicitContentFrame").msgclass
|
272
294
|
ExplicitContentAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.ExplicitContentAnnotation").msgclass
|
273
295
|
NormalizedBoundingBox = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.NormalizedBoundingBox").msgclass
|
296
|
+
FaceDetectionAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceDetectionAnnotation").msgclass
|
297
|
+
PersonDetectionAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.PersonDetectionAnnotation").msgclass
|
274
298
|
FaceSegment = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceSegment").msgclass
|
275
299
|
FaceFrame = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceFrame").msgclass
|
276
300
|
FaceAnnotation = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.cloud.videointelligence.v1.FaceAnnotation").msgclass
|
@@ -24,7 +24,7 @@ module Google
|
|
24
24
|
module VideoIntelligence
|
25
25
|
module V1
|
26
26
|
module VideoIntelligenceService
|
27
|
-
# Service that implements
|
27
|
+
# Service that implements the Video Intelligence API.
|
28
28
|
class Service
|
29
29
|
|
30
30
|
include GRPC::GenericService
|
@@ -37,7 +37,7 @@ module Google
|
|
37
37
|
# retrieved through the `google.longrunning.Operations` interface.
|
38
38
|
# `Operation.metadata` contains `AnnotateVideoProgress` (progress).
|
39
39
|
# `Operation.response` contains `AnnotateVideoResponse` (results).
|
40
|
-
rpc :AnnotateVideo, Google::Cloud::VideoIntelligence::V1::AnnotateVideoRequest, Google::Longrunning::Operation
|
40
|
+
rpc :AnnotateVideo, ::Google::Cloud::VideoIntelligence::V1::AnnotateVideoRequest, ::Google::Longrunning::Operation
|
41
41
|
end
|
42
42
|
|
43
43
|
Stub = Service.rpc_stub_class
|
@@ -43,12 +43,12 @@ module Google
|
|
43
43
|
#
|
44
44
|
# The ResourceDescriptor Yaml config will look like:
|
45
45
|
#
|
46
|
-
#
|
47
|
-
#
|
48
|
-
#
|
49
|
-
#
|
50
|
-
#
|
51
|
-
#
|
46
|
+
# resources:
|
47
|
+
# - type: "pubsub.googleapis.com/Topic"
|
48
|
+
# name_descriptor:
|
49
|
+
# - pattern: "projects/{project}/topics/{topic}"
|
50
|
+
# parent_type: "cloudresourcemanager.googleapis.com/Project"
|
51
|
+
# parent_name_extractor: "projects/{project}"
|
52
52
|
#
|
53
53
|
# Sometimes, resources have multiple patterns, typically because they can
|
54
54
|
# live under multiple parents.
|
@@ -183,15 +183,24 @@ module Google
|
|
183
183
|
# }
|
184
184
|
# @!attribute [rw] plural
|
185
185
|
# @return [::String]
|
186
|
-
# The plural name used in the resource name, such as
|
187
|
-
# the name of 'projects/\\{project}'
|
188
|
-
#
|
186
|
+
# The plural name used in the resource name and permission names, such as
|
187
|
+
# 'projects' for the resource name of 'projects/\\{project}' and the permission
|
188
|
+
# name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same
|
189
|
+
# concept of the `plural` field in k8s CRD spec
|
189
190
|
# https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
|
191
|
+
#
|
192
|
+
# Note: The plural form is required even for singleton resources. See
|
193
|
+
# https://aip.dev/156
|
190
194
|
# @!attribute [rw] singular
|
191
195
|
# @return [::String]
|
192
196
|
# The same concept of the `singular` field in k8s CRD spec
|
193
197
|
# https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
|
194
198
|
# Such as "project" for the `resourcemanager.googleapis.com/Project` type.
|
199
|
+
# @!attribute [rw] style
|
200
|
+
# @return [::Array<::Google::Api::ResourceDescriptor::Style>]
|
201
|
+
# Style flag(s) for this resource.
|
202
|
+
# These indicate that a resource is expected to conform to a given
|
203
|
+
# style. See the specific style flags for additional information.
|
195
204
|
class ResourceDescriptor
|
196
205
|
include ::Google::Protobuf::MessageExts
|
197
206
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -211,6 +220,22 @@ module Google
|
|
211
220
|
# that from being necessary once there are multiple patterns.)
|
212
221
|
FUTURE_MULTI_PATTERN = 2
|
213
222
|
end
|
223
|
+
|
224
|
+
# A flag representing a specific style that a resource claims to conform to.
|
225
|
+
module Style
|
226
|
+
# The unspecified value. Do not use.
|
227
|
+
STYLE_UNSPECIFIED = 0
|
228
|
+
|
229
|
+
# This resource is intended to be "declarative-friendly".
|
230
|
+
#
|
231
|
+
# Declarative-friendly resources must be more strictly consistent, and
|
232
|
+
# setting this to true communicates to tools that this resource should
|
233
|
+
# adhere to declarative-friendly expectations.
|
234
|
+
#
|
235
|
+
# Note: This is used by the API linter (linter.aip.dev) to enable
|
236
|
+
# additional checks.
|
237
|
+
DECLARATIVE_FRIENDLY = 1
|
238
|
+
end
|
214
239
|
end
|
215
240
|
|
216
241
|
# Defines a proto annotation that describes a string field that refers to
|
@@ -226,6 +251,17 @@ module Google
|
|
226
251
|
# type: "pubsub.googleapis.com/Topic"
|
227
252
|
# }];
|
228
253
|
# }
|
254
|
+
#
|
255
|
+
# Occasionally, a field may reference an arbitrary resource. In this case,
|
256
|
+
# APIs use the special value * in their resource reference.
|
257
|
+
#
|
258
|
+
# Example:
|
259
|
+
#
|
260
|
+
# message GetIamPolicyRequest {
|
261
|
+
# string resource = 2 [(google.api.resource_reference) = {
|
262
|
+
# type: "*"
|
263
|
+
# }];
|
264
|
+
# }
|
229
265
|
# @!attribute [rw] child_type
|
230
266
|
# @return [::String]
|
231
267
|
# The resource type of a child collection that the annotated field
|
@@ -234,11 +270,11 @@ module Google
|
|
234
270
|
#
|
235
271
|
# Example:
|
236
272
|
#
|
237
|
-
#
|
238
|
-
#
|
239
|
-
#
|
240
|
-
#
|
241
|
-
#
|
273
|
+
# message ListLogEntriesRequest {
|
274
|
+
# string parent = 1 [(google.api.resource_reference) = {
|
275
|
+
# child_type: "logging.googleapis.com/LogEntry"
|
276
|
+
# };
|
277
|
+
# }
|
242
278
|
class ResourceReference
|
243
279
|
include ::Google::Protobuf::MessageExts
|
244
280
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -25,20 +25,21 @@ module Google
|
|
25
25
|
# @!attribute [rw] input_uri
|
26
26
|
# @return [::String]
|
27
27
|
# Input video location. Currently, only
|
28
|
-
# [
|
29
|
-
# supported
|
28
|
+
# [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
29
|
+
# supported. URIs must be specified in the following format:
|
30
30
|
# `gs://bucket-id/object-id` (other URI formats return
|
31
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
32
|
-
# [Request
|
33
|
-
#
|
34
|
-
# multiple videos
|
31
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
32
|
+
# more information, see [Request
|
33
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
|
34
|
+
# multiple videos, a video URI may include wildcards in the `object-id`.
|
35
|
+
# Supported wildcards: '*' to match 0 or more characters;
|
35
36
|
# '?' to match 1 character. If unset, the input video should be embedded
|
36
|
-
# in the request as `input_content`. If set, `input_content`
|
37
|
+
# in the request as `input_content`. If set, `input_content` must be unset.
|
37
38
|
# @!attribute [rw] input_content
|
38
39
|
# @return [::String]
|
39
40
|
# The video data bytes.
|
40
|
-
# If unset, the input video(s) should be specified via `input_uri`.
|
41
|
-
# If set, `input_uri`
|
41
|
+
# If unset, the input video(s) should be specified via the `input_uri`.
|
42
|
+
# If set, `input_uri` must be unset.
|
42
43
|
# @!attribute [rw] features
|
43
44
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::Feature>]
|
44
45
|
# Required. Requested video annotation features.
|
@@ -48,16 +49,18 @@ module Google
|
|
48
49
|
# @!attribute [rw] output_uri
|
49
50
|
# @return [::String]
|
50
51
|
# Optional. Location where the output (in JSON format) should be stored.
|
51
|
-
# Currently, only [
|
52
|
-
# URIs are supported
|
52
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
53
|
+
# URIs are supported. These must be specified in the following format:
|
53
54
|
# `gs://bucket-id/object-id` (other URI formats return
|
54
|
-
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
55
|
-
# [Request
|
55
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
56
|
+
# more information, see [Request
|
57
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints).
|
56
58
|
# @!attribute [rw] location_id
|
57
59
|
# @return [::String]
|
58
60
|
# Optional. Cloud region where annotation should take place. Supported cloud
|
59
|
-
# regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
60
|
-
# is specified,
|
61
|
+
# regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
62
|
+
# region is specified, the region will be determined based on video file
|
63
|
+
# location.
|
61
64
|
class AnnotateVideoRequest
|
62
65
|
include ::Google::Protobuf::MessageExts
|
63
66
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -87,6 +90,9 @@ module Google
|
|
87
90
|
# @!attribute [rw] text_detection_config
|
88
91
|
# @return [::Google::Cloud::VideoIntelligence::V1::TextDetectionConfig]
|
89
92
|
# Config for TEXT_DETECTION.
|
93
|
+
# @!attribute [rw] person_detection_config
|
94
|
+
# @return [::Google::Cloud::VideoIntelligence::V1::PersonDetectionConfig]
|
95
|
+
# Config for PERSON_DETECTION.
|
90
96
|
# @!attribute [rw] object_tracking_config
|
91
97
|
# @return [::Google::Cloud::VideoIntelligence::V1::ObjectTrackingConfig]
|
92
98
|
# Config for OBJECT_TRACKING.
|
@@ -103,9 +109,9 @@ module Google
|
|
103
109
|
# If unspecified, defaults to `SHOT_MODE`.
|
104
110
|
# @!attribute [rw] stationary_camera
|
105
111
|
# @return [::Boolean]
|
106
|
-
# Whether the video has been shot from a stationary (i.e
|
107
|
-
# When set to true, might improve detection accuracy for moving
|
108
|
-
# Should be used with `SHOT_AND_FRAME_MODE` enabled.
|
112
|
+
# Whether the video has been shot from a stationary (i.e., non-moving)
|
113
|
+
# camera. When set to true, might improve detection accuracy for moving
|
114
|
+
# objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
|
109
115
|
# @!attribute [rw] model
|
110
116
|
# @return [::String]
|
111
117
|
# Model to use for label detection.
|
@@ -117,15 +123,15 @@ module Google
|
|
117
123
|
# frame-level detection. If not set, it is set to 0.4 by default. The valid
|
118
124
|
# range for this threshold is [0.1, 0.9]. Any value set outside of this
|
119
125
|
# range will be clipped.
|
120
|
-
# Note:
|
126
|
+
# Note: For best results, follow the default threshold. We will update
|
121
127
|
# the default threshold everytime when we release a new model.
|
122
128
|
# @!attribute [rw] video_confidence_threshold
|
123
129
|
# @return [::Float]
|
124
130
|
# The confidence threshold we perform filtering on the labels from
|
125
|
-
# video-level and shot-level detections. If not set, it
|
131
|
+
# video-level and shot-level detections. If not set, it's set to 0.3 by
|
126
132
|
# default. The valid range for this threshold is [0.1, 0.9]. Any value set
|
127
133
|
# outside of this range will be clipped.
|
128
|
-
# Note:
|
134
|
+
# Note: For best results, follow the default threshold. We will update
|
129
135
|
# the default threshold everytime when we release a new model.
|
130
136
|
class LabelDetectionConfig
|
131
137
|
include ::Google::Protobuf::MessageExts
|
@@ -162,12 +168,36 @@ module Google
|
|
162
168
|
# "builtin/latest".
|
163
169
|
# @!attribute [rw] include_bounding_boxes
|
164
170
|
# @return [::Boolean]
|
165
|
-
# Whether bounding boxes
|
171
|
+
# Whether bounding boxes are included in the face annotation output.
|
172
|
+
# @!attribute [rw] include_attributes
|
173
|
+
# @return [::Boolean]
|
174
|
+
# Whether to enable face attributes detection, such as glasses, dark_glasses,
|
175
|
+
# mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
|
166
176
|
class FaceDetectionConfig
|
167
177
|
include ::Google::Protobuf::MessageExts
|
168
178
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
169
179
|
end
|
170
180
|
|
181
|
+
# Config for PERSON_DETECTION.
|
182
|
+
# @!attribute [rw] include_bounding_boxes
|
183
|
+
# @return [::Boolean]
|
184
|
+
# Whether bounding boxes are included in the person detection annotation
|
185
|
+
# output.
|
186
|
+
# @!attribute [rw] include_pose_landmarks
|
187
|
+
# @return [::Boolean]
|
188
|
+
# Whether to enable pose landmarks detection. Ignored if
|
189
|
+
# 'include_bounding_boxes' is set to false.
|
190
|
+
# @!attribute [rw] include_attributes
|
191
|
+
# @return [::Boolean]
|
192
|
+
# Whether to enable person attributes detection, such as cloth color (black,
|
193
|
+
# blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
|
194
|
+
# etc.
|
195
|
+
# Ignored if 'include_bounding_boxes' is set to false.
|
196
|
+
class PersonDetectionConfig
|
197
|
+
include ::Google::Protobuf::MessageExts
|
198
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
199
|
+
end
|
200
|
+
|
171
201
|
# Config for EXPLICIT_CONTENT_DETECTION.
|
172
202
|
# @!attribute [rw] model
|
173
203
|
# @return [::String]
|
@@ -244,7 +274,7 @@ module Google
|
|
244
274
|
# API](https://developers.google.com/knowledge-graph/).
|
245
275
|
# @!attribute [rw] description
|
246
276
|
# @return [::String]
|
247
|
-
# Textual description, e.g
|
277
|
+
# Textual description, e.g., `Fixed-gear bicycle`.
|
248
278
|
# @!attribute [rw] language_code
|
249
279
|
# @return [::String]
|
250
280
|
# Language code for `description` in BCP-47 format.
|
@@ -260,15 +290,18 @@ module Google
|
|
260
290
|
# @!attribute [rw] category_entities
|
261
291
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::Entity>]
|
262
292
|
# Common categories for the detected entity.
|
263
|
-
#
|
264
|
-
# cases there might be more than one categories e.g
|
265
|
-
# a `pet`.
|
293
|
+
# For example, when the label is `Terrier`, the category is likely `dog`. And
|
294
|
+
# in some cases there might be more than one categories e.g., `Terrier` could
|
295
|
+
# also be a `pet`.
|
266
296
|
# @!attribute [rw] segments
|
267
297
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelSegment>]
|
268
298
|
# All video segments where a label was detected.
|
269
299
|
# @!attribute [rw] frames
|
270
300
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelFrame>]
|
271
301
|
# All video frames where a label was detected.
|
302
|
+
# @!attribute [rw] version
|
303
|
+
# @return [::String]
|
304
|
+
# Feature version.
|
272
305
|
class LabelAnnotation
|
273
306
|
include ::Google::Protobuf::MessageExts
|
274
307
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -293,6 +326,9 @@ module Google
|
|
293
326
|
# @!attribute [rw] frames
|
294
327
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::ExplicitContentFrame>]
|
295
328
|
# All video frames where explicit content was detected.
|
329
|
+
# @!attribute [rw] version
|
330
|
+
# @return [::String]
|
331
|
+
# Feature version.
|
296
332
|
class ExplicitContentAnnotation
|
297
333
|
include ::Google::Protobuf::MessageExts
|
298
334
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -318,6 +354,27 @@ module Google
|
|
318
354
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
319
355
|
end
|
320
356
|
|
357
|
+
# Face detection annotation.
|
358
|
+
# @!attribute [rw] version
|
359
|
+
# @return [::String]
|
360
|
+
# Feature version.
|
361
|
+
class FaceDetectionAnnotation
|
362
|
+
include ::Google::Protobuf::MessageExts
|
363
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
364
|
+
end
|
365
|
+
|
366
|
+
# Person detection annotation per video.
|
367
|
+
# @!attribute [rw] tracks
|
368
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::Track>]
|
369
|
+
# The detected tracks of a person.
|
370
|
+
# @!attribute [rw] version
|
371
|
+
# @return [::String]
|
372
|
+
# Feature version.
|
373
|
+
class PersonDetectionAnnotation
|
374
|
+
include ::Google::Protobuf::MessageExts
|
375
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
376
|
+
end
|
377
|
+
|
321
378
|
# Video segment level annotation results for face detection.
|
322
379
|
# @!attribute [rw] segment
|
323
380
|
# @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
|
@@ -327,7 +384,7 @@ module Google
|
|
327
384
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
328
385
|
end
|
329
386
|
|
330
|
-
#
|
387
|
+
# Deprecated. No effect.
|
331
388
|
# @!attribute [rw] normalized_bounding_boxes
|
332
389
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox>]
|
333
390
|
# Normalized Bounding boxes in a frame.
|
@@ -342,7 +399,7 @@ module Google
|
|
342
399
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
343
400
|
end
|
344
401
|
|
345
|
-
#
|
402
|
+
# Deprecated. No effect.
|
346
403
|
# @!attribute [rw] thumbnail
|
347
404
|
# @return [::String]
|
348
405
|
# Thumbnail of a representative face view (in JPEG format).
|
@@ -399,7 +456,7 @@ module Google
|
|
399
456
|
# A generic detected attribute represented by name in string format.
|
400
457
|
# @!attribute [rw] name
|
401
458
|
# @return [::String]
|
402
|
-
# The name of the attribute,
|
459
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
403
460
|
# A full list of supported type names will be provided in the document.
|
404
461
|
# @!attribute [rw] confidence
|
405
462
|
# @return [::Float]
|
@@ -417,7 +474,7 @@ module Google
|
|
417
474
|
# location.
|
418
475
|
# @!attribute [rw] name
|
419
476
|
# @return [::String]
|
420
|
-
# The name of this landmark,
|
477
|
+
# The name of this landmark, for example, left_hand, right_shoulder.
|
421
478
|
# @!attribute [rw] point
|
422
479
|
# @return [::Google::Cloud::VideoIntelligence::V1::NormalizedVertex]
|
423
480
|
# The 2D point of the detected landmark using the normalized image
|
@@ -434,17 +491,17 @@ module Google
|
|
434
491
|
# @!attribute [rw] input_uri
|
435
492
|
# @return [::String]
|
436
493
|
# Video file location in
|
437
|
-
# [
|
494
|
+
# [Cloud Storage](https://cloud.google.com/storage/).
|
438
495
|
# @!attribute [rw] segment
|
439
496
|
# @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
|
440
497
|
# Video segment on which the annotation is run.
|
441
498
|
# @!attribute [rw] segment_label_annotations
|
442
499
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
|
443
|
-
# Topical label annotations on video level or user
|
500
|
+
# Topical label annotations on video level or user-specified segment level.
|
444
501
|
# There is exactly one element for each unique label.
|
445
502
|
# @!attribute [rw] segment_presence_label_annotations
|
446
503
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
|
447
|
-
# Presence label annotations on video level or user
|
504
|
+
# Presence label annotations on video level or user-specified segment level.
|
448
505
|
# There is exactly one element for each unique label. Compared to the
|
449
506
|
# existing topical `segment_label_annotations`, this field presents more
|
450
507
|
# fine-grained, segment-level labels detected in video content and is made
|
@@ -467,7 +524,10 @@ module Google
|
|
467
524
|
# There is exactly one element for each unique label.
|
468
525
|
# @!attribute [rw] face_annotations
|
469
526
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::FaceAnnotation>]
|
470
|
-
#
|
527
|
+
# Deprecated. Please use `face_detection_annotations` instead.
|
528
|
+
# @!attribute [rw] face_detection_annotations
|
529
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::FaceDetectionAnnotation>]
|
530
|
+
# Face detection annotations.
|
471
531
|
# @!attribute [rw] shot_annotations
|
472
532
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::VideoSegment>]
|
473
533
|
# Shot annotations. Each shot is represented as a video segment.
|
@@ -488,6 +548,9 @@ module Google
|
|
488
548
|
# @!attribute [rw] logo_recognition_annotations
|
489
549
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::LogoRecognitionAnnotation>]
|
490
550
|
# Annotations for list of logos detected, tracked and recognized in video.
|
551
|
+
# @!attribute [rw] person_detection_annotations
|
552
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::PersonDetectionAnnotation>]
|
553
|
+
# Person detection annotations.
|
491
554
|
# @!attribute [rw] error
|
492
555
|
# @return [::Google::Rpc::Status]
|
493
556
|
# If set, indicates an error. Note that for a single `AnnotateVideoRequest`
|
@@ -512,7 +575,7 @@ module Google
|
|
512
575
|
# @!attribute [rw] input_uri
|
513
576
|
# @return [::String]
|
514
577
|
# Video file location in
|
515
|
-
# [
|
578
|
+
# [Cloud Storage](https://cloud.google.com/storage/).
|
516
579
|
# @!attribute [rw] progress_percent
|
517
580
|
# @return [::Integer]
|
518
581
|
# Approximate percentage processed thus far. Guaranteed to be
|
@@ -526,11 +589,11 @@ module Google
|
|
526
589
|
# @!attribute [rw] feature
|
527
590
|
# @return [::Google::Cloud::VideoIntelligence::V1::Feature]
|
528
591
|
# Specifies which feature is being tracked if the request contains more than
|
529
|
-
# one
|
592
|
+
# one feature.
|
530
593
|
# @!attribute [rw] segment
|
531
594
|
# @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
|
532
595
|
# Specifies which segment is being tracked if the request contains more than
|
533
|
-
# one
|
596
|
+
# one segment.
|
534
597
|
class VideoAnnotationProgress
|
535
598
|
include ::Google::Protobuf::MessageExts
|
536
599
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -589,14 +652,14 @@ module Google
|
|
589
652
|
# the top alternative of the recognition result using a speaker_tag provided
|
590
653
|
# in the WordInfo.
|
591
654
|
# Note: When this is true, we send all the words from the beginning of the
|
592
|
-
# audio for the top alternative in every consecutive
|
655
|
+
# audio for the top alternative in every consecutive response.
|
593
656
|
# This is done in order to improve our speaker tags as our models learn to
|
594
657
|
# identify the speakers in the conversation over time.
|
595
658
|
# @!attribute [rw] diarization_speaker_count
|
596
659
|
# @return [::Integer]
|
597
|
-
# Optional. If set, specifies the estimated number of speakers in the
|
598
|
-
# If not set, defaults to '2'.
|
599
|
-
#
|
660
|
+
# Optional. If set, specifies the estimated number of speakers in the
|
661
|
+
# conversation. If not set, defaults to '2'. Ignored unless
|
662
|
+
# enable_speaker_diarization is set to true.
|
600
663
|
# @!attribute [rw] enable_word_confidence
|
601
664
|
# @return [::Boolean]
|
602
665
|
# Optional. If `true`, the top result includes a list of words and the
|
@@ -631,9 +694,9 @@ module Google
|
|
631
694
|
# ranked by the recognizer.
|
632
695
|
# @!attribute [r] language_code
|
633
696
|
# @return [::String]
|
634
|
-
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
635
|
-
# the language in this result. This language code was
|
636
|
-
# most likelihood of being spoken in the audio.
|
697
|
+
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
698
|
+
# language tag of the language in this result. This language code was
|
699
|
+
# detected to have the most likelihood of being spoken in the audio.
|
637
700
|
class SpeechTranscription
|
638
701
|
include ::Google::Protobuf::MessageExts
|
639
702
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -654,8 +717,8 @@ module Google
|
|
654
717
|
# @!attribute [r] words
|
655
718
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::WordInfo>]
|
656
719
|
# Output only. A list of word-specific information for each recognized word.
|
657
|
-
# Note: When `enable_speaker_diarization` is true, you will see all
|
658
|
-
# from the beginning of the audio.
|
720
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
721
|
+
# the words from the beginning of the audio.
|
659
722
|
class SpeechRecognitionAlternative
|
660
723
|
include ::Google::Protobuf::MessageExts
|
661
724
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -776,6 +839,9 @@ module Google
|
|
776
839
|
# @!attribute [rw] segments
|
777
840
|
# @return [::Array<::Google::Cloud::VideoIntelligence::V1::TextSegment>]
|
778
841
|
# All video segments where OCR detected text appears.
|
842
|
+
# @!attribute [rw] version
|
843
|
+
# @return [::String]
|
844
|
+
# Feature version.
|
779
845
|
class TextAnnotation
|
780
846
|
include ::Google::Protobuf::MessageExts
|
781
847
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -819,6 +885,9 @@ module Google
|
|
819
885
|
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
820
886
|
# messages in frames.
|
821
887
|
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
888
|
+
# @!attribute [rw] version
|
889
|
+
# @return [::String]
|
890
|
+
# Feature version.
|
822
891
|
class ObjectTrackingAnnotation
|
823
892
|
include ::Google::Protobuf::MessageExts
|
824
893
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -856,7 +925,7 @@ module Google
|
|
856
925
|
# Explicit content detection.
|
857
926
|
EXPLICIT_CONTENT_DETECTION = 3
|
858
927
|
|
859
|
-
# Human face detection
|
928
|
+
# Human face detection.
|
860
929
|
FACE_DETECTION = 4
|
861
930
|
|
862
931
|
# Speech transcription.
|
@@ -870,6 +939,9 @@ module Google
|
|
870
939
|
|
871
940
|
# Logo detection, tracking, and recognition.
|
872
941
|
LOGO_RECOGNITION = 12
|
942
|
+
|
943
|
+
# Person detection.
|
944
|
+
PERSON_DETECTION = 14
|
873
945
|
end
|
874
946
|
|
875
947
|
# Label detection mode.
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-video_intelligence-v1
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Google LLC
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2020-
|
11
|
+
date: 2020-09-30 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: gapic-common
|
@@ -16,14 +16,14 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - "~>"
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: '0.
|
19
|
+
version: '0.3'
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - "~>"
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: '0.
|
26
|
+
version: '0.3'
|
27
27
|
- !ruby/object:Gem::Dependency
|
28
28
|
name: google-cloud-errors
|
29
29
|
requirement: !ruby/object:Gem::Requirement
|
@@ -200,7 +200,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
200
200
|
- !ruby/object:Gem::Version
|
201
201
|
version: '0'
|
202
202
|
requirements: []
|
203
|
-
rubygems_version: 3.
|
203
|
+
rubygems_version: 3.1.4
|
204
204
|
signing_key:
|
205
205
|
specification_version: 4
|
206
206
|
summary: API Client library for the Cloud Video Intelligence V1 API
|