google-cloud-video_intelligence-v1p3beta1 0.3.0 → 0.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +2 -2
- data/lib/google/cloud/video_intelligence/v1p3beta1/streaming_video_intelligence_service/client.rb +2 -2
- data/lib/google/cloud/video_intelligence/v1p3beta1/version.rb +1 -1
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/client.rb +2 -2
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/operations.rb +2 -2
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/rest/client.rb +2 -2
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/rest/operations.rb +2 -2
- data/lib/google/cloud/videointelligence/v1p3beta1/video_intelligence_pb.rb +27 -321
- data/proto_docs/google/api/client.rb +67 -4
- data/proto_docs/google/protobuf/any.rb +7 -4
- data/proto_docs/google/protobuf/timestamp.rb +1 -3
- metadata +7 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: de6e60306b62ee2e1ed3a8b1cbc06a8b3e0ec3f6ef343eacc53eb5247f761a94
|
4
|
+
data.tar.gz: ce675b95da6ab583e6eaa0436ada996bf76d315b3a25a5ab986214d80c823ea9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b1059d59098561c7c9f0147744665ae14452e15a63e669f78423417920252596097bea47745453a5e568c565e5966472b965c4c0802eb5c0d007df13bb3f3822
|
7
|
+
data.tar.gz: 9e0513ed7b478644ea8389db8c16f91796ee02b9c52598b6c8befde657c34076f687093692af7595ce24adf479e146221210ee0dfed1dc460f6fb3a81f8e2570
|
data/README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# Ruby Client for the Cloud Video Intelligence V1p3beta1 API
|
2
2
|
|
3
|
-
|
3
|
+
Detects objects, explicit content, and scene changes in videos. It also specifies the region for annotation and transcribes speech to text. Supports both asynchronous API and streaming API.
|
4
4
|
|
5
5
|
Detects objects, explicit content, and scene changes in videos. It also specifies the region for annotation and transcribes speech to text. Supports both asynchronous API and streaming API.
|
6
6
|
|
@@ -47,7 +47,7 @@ for general usage information.
|
|
47
47
|
|
48
48
|
To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
|
49
49
|
The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/current/stdlibs/logger/Logger.html) as shown below,
|
50
|
-
or a [`Google::Cloud::Logging::Logger`](https://
|
50
|
+
or a [`Google::Cloud::Logging::Logger`](https://cloud.google.com/ruby/docs/reference/google-cloud-logging/latest)
|
51
51
|
that will write logs to [Cloud Logging](https://cloud.google.com/logging/). See [grpc/logconfig.rb](https://github.com/grpc/grpc/blob/master/src/ruby/lib/grpc/logconfig.rb)
|
52
52
|
and the gRPC [spec_helper.rb](https://github.com/grpc/grpc/blob/master/src/ruby/spec/spec_helper.rb) for additional information.
|
53
53
|
|
data/lib/google/cloud/video_intelligence/v1p3beta1/streaming_video_intelligence_service/client.rb
CHANGED
@@ -265,9 +265,9 @@ module Google
|
|
265
265
|
# * (`String`) The path to a service account key file in JSON format
|
266
266
|
# * (`Hash`) A service account key as a Hash
|
267
267
|
# * (`Google::Auth::Credentials`) A googleauth credentials object
|
268
|
-
# (see the [googleauth docs](https://
|
268
|
+
# (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
|
269
269
|
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
|
270
|
-
# (see the [signet docs](https://
|
270
|
+
# (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
|
271
271
|
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
|
272
272
|
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
|
273
273
|
# * (`nil`) indicating no credentials
|
@@ -317,9 +317,9 @@ module Google
|
|
317
317
|
# * (`String`) The path to a service account key file in JSON format
|
318
318
|
# * (`Hash`) A service account key as a Hash
|
319
319
|
# * (`Google::Auth::Credentials`) A googleauth credentials object
|
320
|
-
# (see the [googleauth docs](https://
|
320
|
+
# (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
|
321
321
|
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
|
322
|
-
# (see the [signet docs](https://
|
322
|
+
# (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
|
323
323
|
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
|
324
324
|
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
|
325
325
|
# * (`nil`) indicating no credentials
|
@@ -620,9 +620,9 @@ module Google
|
|
620
620
|
# * (`String`) The path to a service account key file in JSON format
|
621
621
|
# * (`Hash`) A service account key as a Hash
|
622
622
|
# * (`Google::Auth::Credentials`) A googleauth credentials object
|
623
|
-
# (see the [googleauth docs](https://
|
623
|
+
# (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
|
624
624
|
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
|
625
|
-
# (see the [signet docs](https://
|
625
|
+
# (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
|
626
626
|
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
|
627
627
|
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
|
628
628
|
# * (`nil`) indicating no credentials
|
data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/rest/client.rb
CHANGED
@@ -286,9 +286,9 @@ module Google
|
|
286
286
|
# * (`String`) The path to a service account key file in JSON format
|
287
287
|
# * (`Hash`) A service account key as a Hash
|
288
288
|
# * (`Google::Auth::Credentials`) A googleauth credentials object
|
289
|
-
# (see the [googleauth docs](https://
|
289
|
+
# (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
|
290
290
|
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
|
291
|
-
# (see the [signet docs](https://
|
291
|
+
# (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
|
292
292
|
# * (`nil`) indicating no credentials
|
293
293
|
# @return [::Object]
|
294
294
|
# @!attribute [rw] scope
|
data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/rest/operations.rb
CHANGED
@@ -411,9 +411,9 @@ module Google
|
|
411
411
|
# * (`String`) The path to a service account key file in JSON format
|
412
412
|
# * (`Hash`) A service account key as a Hash
|
413
413
|
# * (`Google::Auth::Credentials`) A googleauth credentials object
|
414
|
-
# (see the [googleauth docs](https://
|
414
|
+
# (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
|
415
415
|
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
|
416
|
-
# (see the [signet docs](https://
|
416
|
+
# (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
|
417
417
|
# * (`nil`) indicating no credentials
|
418
418
|
# @return [::Object]
|
419
419
|
# @!attribute [rw] scope
|
@@ -1,3 +1,4 @@
|
|
1
|
+
# frozen_string_literal: true
|
1
2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
2
3
|
# source: google/cloud/videointelligence/v1p3beta1/video_intelligence.proto
|
3
4
|
|
@@ -11,329 +12,34 @@ require 'google/protobuf/duration_pb'
|
|
11
12
|
require 'google/protobuf/timestamp_pb'
|
12
13
|
require 'google/rpc/status_pb'
|
13
14
|
|
14
|
-
|
15
|
-
add_file("google/cloud/videointelligence/v1p3beta1/video_intelligence.proto", :syntax => :proto3) do
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
optional :model, :string, 3
|
39
|
-
optional :frame_confidence_threshold, :float, 4
|
40
|
-
optional :video_confidence_threshold, :float, 5
|
41
|
-
end
|
42
|
-
add_message "google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig" do
|
43
|
-
optional :model, :string, 1
|
44
|
-
end
|
45
|
-
add_message "google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig" do
|
46
|
-
optional :model, :string, 1
|
47
|
-
end
|
48
|
-
add_message "google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig" do
|
49
|
-
optional :model, :string, 1
|
50
|
-
end
|
51
|
-
add_message "google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig" do
|
52
|
-
optional :model, :string, 1
|
53
|
-
optional :include_bounding_boxes, :bool, 2
|
54
|
-
optional :include_attributes, :bool, 5
|
55
|
-
end
|
56
|
-
add_message "google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig" do
|
57
|
-
optional :include_bounding_boxes, :bool, 1
|
58
|
-
optional :include_pose_landmarks, :bool, 2
|
59
|
-
optional :include_attributes, :bool, 3
|
60
|
-
end
|
61
|
-
add_message "google.cloud.videointelligence.v1p3beta1.TextDetectionConfig" do
|
62
|
-
repeated :language_hints, :string, 1
|
63
|
-
optional :model, :string, 2
|
64
|
-
end
|
65
|
-
add_message "google.cloud.videointelligence.v1p3beta1.VideoSegment" do
|
66
|
-
optional :start_time_offset, :message, 1, "google.protobuf.Duration"
|
67
|
-
optional :end_time_offset, :message, 2, "google.protobuf.Duration"
|
68
|
-
end
|
69
|
-
add_message "google.cloud.videointelligence.v1p3beta1.LabelSegment" do
|
70
|
-
optional :segment, :message, 1, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
71
|
-
optional :confidence, :float, 2
|
72
|
-
end
|
73
|
-
add_message "google.cloud.videointelligence.v1p3beta1.LabelFrame" do
|
74
|
-
optional :time_offset, :message, 1, "google.protobuf.Duration"
|
75
|
-
optional :confidence, :float, 2
|
76
|
-
end
|
77
|
-
add_message "google.cloud.videointelligence.v1p3beta1.Entity" do
|
78
|
-
optional :entity_id, :string, 1
|
79
|
-
optional :description, :string, 2
|
80
|
-
optional :language_code, :string, 3
|
81
|
-
end
|
82
|
-
add_message "google.cloud.videointelligence.v1p3beta1.LabelAnnotation" do
|
83
|
-
optional :entity, :message, 1, "google.cloud.videointelligence.v1p3beta1.Entity"
|
84
|
-
repeated :category_entities, :message, 2, "google.cloud.videointelligence.v1p3beta1.Entity"
|
85
|
-
repeated :segments, :message, 3, "google.cloud.videointelligence.v1p3beta1.LabelSegment"
|
86
|
-
repeated :frames, :message, 4, "google.cloud.videointelligence.v1p3beta1.LabelFrame"
|
87
|
-
end
|
88
|
-
add_message "google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame" do
|
89
|
-
optional :time_offset, :message, 1, "google.protobuf.Duration"
|
90
|
-
optional :pornography_likelihood, :enum, 2, "google.cloud.videointelligence.v1p3beta1.Likelihood"
|
91
|
-
end
|
92
|
-
add_message "google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation" do
|
93
|
-
repeated :frames, :message, 1, "google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"
|
94
|
-
end
|
95
|
-
add_message "google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox" do
|
96
|
-
optional :left, :float, 1
|
97
|
-
optional :top, :float, 2
|
98
|
-
optional :right, :float, 3
|
99
|
-
optional :bottom, :float, 4
|
100
|
-
end
|
101
|
-
add_message "google.cloud.videointelligence.v1p3beta1.TimestampedObject" do
|
102
|
-
optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox"
|
103
|
-
optional :time_offset, :message, 2, "google.protobuf.Duration"
|
104
|
-
repeated :attributes, :message, 3, "google.cloud.videointelligence.v1p3beta1.DetectedAttribute"
|
105
|
-
repeated :landmarks, :message, 4, "google.cloud.videointelligence.v1p3beta1.DetectedLandmark"
|
106
|
-
end
|
107
|
-
add_message "google.cloud.videointelligence.v1p3beta1.Track" do
|
108
|
-
optional :segment, :message, 1, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
109
|
-
repeated :timestamped_objects, :message, 2, "google.cloud.videointelligence.v1p3beta1.TimestampedObject"
|
110
|
-
repeated :attributes, :message, 3, "google.cloud.videointelligence.v1p3beta1.DetectedAttribute"
|
111
|
-
optional :confidence, :float, 4
|
112
|
-
end
|
113
|
-
add_message "google.cloud.videointelligence.v1p3beta1.DetectedAttribute" do
|
114
|
-
optional :name, :string, 1
|
115
|
-
optional :confidence, :float, 2
|
116
|
-
optional :value, :string, 3
|
117
|
-
end
|
118
|
-
add_message "google.cloud.videointelligence.v1p3beta1.Celebrity" do
|
119
|
-
optional :name, :string, 1
|
120
|
-
optional :display_name, :string, 2
|
121
|
-
optional :description, :string, 3
|
122
|
-
end
|
123
|
-
add_message "google.cloud.videointelligence.v1p3beta1.CelebrityTrack" do
|
124
|
-
repeated :celebrities, :message, 1, "google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity"
|
125
|
-
optional :face_track, :message, 3, "google.cloud.videointelligence.v1p3beta1.Track"
|
126
|
-
end
|
127
|
-
add_message "google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity" do
|
128
|
-
optional :celebrity, :message, 1, "google.cloud.videointelligence.v1p3beta1.Celebrity"
|
129
|
-
optional :confidence, :float, 2
|
130
|
-
end
|
131
|
-
add_message "google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation" do
|
132
|
-
repeated :celebrity_tracks, :message, 1, "google.cloud.videointelligence.v1p3beta1.CelebrityTrack"
|
133
|
-
end
|
134
|
-
add_message "google.cloud.videointelligence.v1p3beta1.DetectedLandmark" do
|
135
|
-
optional :name, :string, 1
|
136
|
-
optional :point, :message, 2, "google.cloud.videointelligence.v1p3beta1.NormalizedVertex"
|
137
|
-
optional :confidence, :float, 3
|
138
|
-
end
|
139
|
-
add_message "google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation" do
|
140
|
-
repeated :tracks, :message, 3, "google.cloud.videointelligence.v1p3beta1.Track"
|
141
|
-
optional :thumbnail, :bytes, 4
|
142
|
-
end
|
143
|
-
add_message "google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation" do
|
144
|
-
repeated :tracks, :message, 1, "google.cloud.videointelligence.v1p3beta1.Track"
|
145
|
-
end
|
146
|
-
add_message "google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults" do
|
147
|
-
optional :input_uri, :string, 1
|
148
|
-
optional :segment, :message, 10, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
149
|
-
repeated :segment_label_annotations, :message, 2, "google.cloud.videointelligence.v1p3beta1.LabelAnnotation"
|
150
|
-
repeated :segment_presence_label_annotations, :message, 23, "google.cloud.videointelligence.v1p3beta1.LabelAnnotation"
|
151
|
-
repeated :shot_label_annotations, :message, 3, "google.cloud.videointelligence.v1p3beta1.LabelAnnotation"
|
152
|
-
repeated :shot_presence_label_annotations, :message, 24, "google.cloud.videointelligence.v1p3beta1.LabelAnnotation"
|
153
|
-
repeated :frame_label_annotations, :message, 4, "google.cloud.videointelligence.v1p3beta1.LabelAnnotation"
|
154
|
-
repeated :face_detection_annotations, :message, 13, "google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation"
|
155
|
-
repeated :shot_annotations, :message, 6, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
156
|
-
optional :explicit_annotation, :message, 7, "google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation"
|
157
|
-
repeated :speech_transcriptions, :message, 11, "google.cloud.videointelligence.v1p3beta1.SpeechTranscription"
|
158
|
-
repeated :text_annotations, :message, 12, "google.cloud.videointelligence.v1p3beta1.TextAnnotation"
|
159
|
-
repeated :object_annotations, :message, 14, "google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"
|
160
|
-
repeated :logo_recognition_annotations, :message, 19, "google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation"
|
161
|
-
repeated :person_detection_annotations, :message, 20, "google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation"
|
162
|
-
optional :celebrity_recognition_annotations, :message, 21, "google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation"
|
163
|
-
optional :error, :message, 9, "google.rpc.Status"
|
164
|
-
end
|
165
|
-
add_message "google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse" do
|
166
|
-
repeated :annotation_results, :message, 1, "google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"
|
167
|
-
end
|
168
|
-
add_message "google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress" do
|
169
|
-
optional :input_uri, :string, 1
|
170
|
-
optional :progress_percent, :int32, 2
|
171
|
-
optional :start_time, :message, 3, "google.protobuf.Timestamp"
|
172
|
-
optional :update_time, :message, 4, "google.protobuf.Timestamp"
|
173
|
-
optional :feature, :enum, 5, "google.cloud.videointelligence.v1p3beta1.Feature"
|
174
|
-
optional :segment, :message, 6, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
175
|
-
end
|
176
|
-
add_message "google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress" do
|
177
|
-
repeated :annotation_progress, :message, 1, "google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"
|
178
|
-
end
|
179
|
-
add_message "google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig" do
|
180
|
-
optional :language_code, :string, 1
|
181
|
-
optional :max_alternatives, :int32, 2
|
182
|
-
optional :filter_profanity, :bool, 3
|
183
|
-
repeated :speech_contexts, :message, 4, "google.cloud.videointelligence.v1p3beta1.SpeechContext"
|
184
|
-
optional :enable_automatic_punctuation, :bool, 5
|
185
|
-
repeated :audio_tracks, :int32, 6
|
186
|
-
optional :enable_speaker_diarization, :bool, 7
|
187
|
-
optional :diarization_speaker_count, :int32, 8
|
188
|
-
optional :enable_word_confidence, :bool, 9
|
189
|
-
end
|
190
|
-
add_message "google.cloud.videointelligence.v1p3beta1.SpeechContext" do
|
191
|
-
repeated :phrases, :string, 1
|
192
|
-
end
|
193
|
-
add_message "google.cloud.videointelligence.v1p3beta1.SpeechTranscription" do
|
194
|
-
repeated :alternatives, :message, 1, "google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative"
|
195
|
-
optional :language_code, :string, 2
|
196
|
-
end
|
197
|
-
add_message "google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative" do
|
198
|
-
optional :transcript, :string, 1
|
199
|
-
optional :confidence, :float, 2
|
200
|
-
repeated :words, :message, 3, "google.cloud.videointelligence.v1p3beta1.WordInfo"
|
201
|
-
end
|
202
|
-
add_message "google.cloud.videointelligence.v1p3beta1.WordInfo" do
|
203
|
-
optional :start_time, :message, 1, "google.protobuf.Duration"
|
204
|
-
optional :end_time, :message, 2, "google.protobuf.Duration"
|
205
|
-
optional :word, :string, 3
|
206
|
-
optional :confidence, :float, 4
|
207
|
-
optional :speaker_tag, :int32, 5
|
208
|
-
end
|
209
|
-
add_message "google.cloud.videointelligence.v1p3beta1.NormalizedVertex" do
|
210
|
-
optional :x, :float, 1
|
211
|
-
optional :y, :float, 2
|
212
|
-
end
|
213
|
-
add_message "google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly" do
|
214
|
-
repeated :vertices, :message, 1, "google.cloud.videointelligence.v1p3beta1.NormalizedVertex"
|
215
|
-
end
|
216
|
-
add_message "google.cloud.videointelligence.v1p3beta1.TextSegment" do
|
217
|
-
optional :segment, :message, 1, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
218
|
-
optional :confidence, :float, 2
|
219
|
-
repeated :frames, :message, 3, "google.cloud.videointelligence.v1p3beta1.TextFrame"
|
220
|
-
end
|
221
|
-
add_message "google.cloud.videointelligence.v1p3beta1.TextFrame" do
|
222
|
-
optional :rotated_bounding_box, :message, 1, "google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly"
|
223
|
-
optional :time_offset, :message, 2, "google.protobuf.Duration"
|
224
|
-
end
|
225
|
-
add_message "google.cloud.videointelligence.v1p3beta1.TextAnnotation" do
|
226
|
-
optional :text, :string, 1
|
227
|
-
repeated :segments, :message, 2, "google.cloud.videointelligence.v1p3beta1.TextSegment"
|
228
|
-
end
|
229
|
-
add_message "google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame" do
|
230
|
-
optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox"
|
231
|
-
optional :time_offset, :message, 2, "google.protobuf.Duration"
|
232
|
-
end
|
233
|
-
add_message "google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation" do
|
234
|
-
optional :entity, :message, 1, "google.cloud.videointelligence.v1p3beta1.Entity"
|
235
|
-
optional :confidence, :float, 4
|
236
|
-
repeated :frames, :message, 2, "google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame"
|
237
|
-
oneof :track_info do
|
238
|
-
optional :segment, :message, 3, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
239
|
-
optional :track_id, :int64, 5
|
240
|
-
end
|
241
|
-
end
|
242
|
-
add_message "google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation" do
|
243
|
-
optional :entity, :message, 1, "google.cloud.videointelligence.v1p3beta1.Entity"
|
244
|
-
repeated :tracks, :message, 2, "google.cloud.videointelligence.v1p3beta1.Track"
|
245
|
-
repeated :segments, :message, 3, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
246
|
-
end
|
247
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest" do
|
248
|
-
oneof :streaming_request do
|
249
|
-
optional :video_config, :message, 1, "google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig"
|
250
|
-
optional :input_content, :bytes, 2
|
251
|
-
end
|
252
|
-
end
|
253
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig" do
|
254
|
-
optional :feature, :enum, 1, "google.cloud.videointelligence.v1p3beta1.StreamingFeature"
|
255
|
-
optional :storage_config, :message, 30, "google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig"
|
256
|
-
oneof :streaming_config do
|
257
|
-
optional :shot_change_detection_config, :message, 2, "google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig"
|
258
|
-
optional :label_detection_config, :message, 3, "google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig"
|
259
|
-
optional :explicit_content_detection_config, :message, 4, "google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig"
|
260
|
-
optional :object_tracking_config, :message, 5, "google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig"
|
261
|
-
optional :automl_action_recognition_config, :message, 23, "google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig"
|
262
|
-
optional :automl_classification_config, :message, 21, "google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig"
|
263
|
-
optional :automl_object_tracking_config, :message, 22, "google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig"
|
264
|
-
end
|
265
|
-
end
|
266
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse" do
|
267
|
-
optional :error, :message, 1, "google.rpc.Status"
|
268
|
-
optional :annotation_results, :message, 2, "google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults"
|
269
|
-
optional :annotation_results_uri, :string, 3
|
270
|
-
end
|
271
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults" do
|
272
|
-
repeated :shot_annotations, :message, 1, "google.cloud.videointelligence.v1p3beta1.VideoSegment"
|
273
|
-
repeated :label_annotations, :message, 2, "google.cloud.videointelligence.v1p3beta1.LabelAnnotation"
|
274
|
-
optional :explicit_annotation, :message, 3, "google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation"
|
275
|
-
repeated :object_annotations, :message, 4, "google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"
|
276
|
-
end
|
277
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig" do
|
278
|
-
end
|
279
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig" do
|
280
|
-
optional :stationary_camera, :bool, 1
|
281
|
-
end
|
282
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig" do
|
283
|
-
end
|
284
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig" do
|
285
|
-
end
|
286
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfig" do
|
287
|
-
optional :model_name, :string, 1
|
288
|
-
end
|
289
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfig" do
|
290
|
-
optional :model_name, :string, 1
|
291
|
-
end
|
292
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfig" do
|
293
|
-
optional :model_name, :string, 1
|
294
|
-
end
|
295
|
-
add_message "google.cloud.videointelligence.v1p3beta1.StreamingStorageConfig" do
|
296
|
-
optional :enable_storage_annotation_result, :bool, 1
|
297
|
-
optional :annotation_result_storage_directory, :string, 3
|
298
|
-
end
|
299
|
-
add_enum "google.cloud.videointelligence.v1p3beta1.LabelDetectionMode" do
|
300
|
-
value :LABEL_DETECTION_MODE_UNSPECIFIED, 0
|
301
|
-
value :SHOT_MODE, 1
|
302
|
-
value :FRAME_MODE, 2
|
303
|
-
value :SHOT_AND_FRAME_MODE, 3
|
304
|
-
end
|
305
|
-
add_enum "google.cloud.videointelligence.v1p3beta1.Likelihood" do
|
306
|
-
value :LIKELIHOOD_UNSPECIFIED, 0
|
307
|
-
value :VERY_UNLIKELY, 1
|
308
|
-
value :UNLIKELY, 2
|
309
|
-
value :POSSIBLE, 3
|
310
|
-
value :LIKELY, 4
|
311
|
-
value :VERY_LIKELY, 5
|
312
|
-
end
|
313
|
-
add_enum "google.cloud.videointelligence.v1p3beta1.StreamingFeature" do
|
314
|
-
value :STREAMING_FEATURE_UNSPECIFIED, 0
|
315
|
-
value :STREAMING_LABEL_DETECTION, 1
|
316
|
-
value :STREAMING_SHOT_CHANGE_DETECTION, 2
|
317
|
-
value :STREAMING_EXPLICIT_CONTENT_DETECTION, 3
|
318
|
-
value :STREAMING_OBJECT_TRACKING, 4
|
319
|
-
value :STREAMING_AUTOML_ACTION_RECOGNITION, 23
|
320
|
-
value :STREAMING_AUTOML_CLASSIFICATION, 21
|
321
|
-
value :STREAMING_AUTOML_OBJECT_TRACKING, 22
|
322
|
-
end
|
323
|
-
add_enum "google.cloud.videointelligence.v1p3beta1.Feature" do
|
324
|
-
value :FEATURE_UNSPECIFIED, 0
|
325
|
-
value :LABEL_DETECTION, 1
|
326
|
-
value :SHOT_CHANGE_DETECTION, 2
|
327
|
-
value :EXPLICIT_CONTENT_DETECTION, 3
|
328
|
-
value :FACE_DETECTION, 4
|
329
|
-
value :SPEECH_TRANSCRIPTION, 6
|
330
|
-
value :TEXT_DETECTION, 7
|
331
|
-
value :OBJECT_TRACKING, 9
|
332
|
-
value :LOGO_RECOGNITION, 12
|
333
|
-
value :CELEBRITY_RECOGNITION, 13
|
334
|
-
value :PERSON_DETECTION, 14
|
15
|
+
|
16
|
+
descriptor_data = "\nAgoogle/cloud/videointelligence/v1p3beta1/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01\"\x80\x07\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12\\\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12`\n\x17person_detection_config\x18\x0b \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig\"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12\"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12\"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02\"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t\"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08\"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08\"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t\"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02\"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t\"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame\"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood\"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame\"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02\"\xcf\x02\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12R\n\tlandmarks\x18\x04 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.DetectedLandmarkB\x03\xe0\x41\x01\"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01\"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t\"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02\"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack\"\x7f\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12I\n\x05point\x18\x02 \x01(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02\"m\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12?\n\x06tracks\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c\"\\\n\x19PersonDetectionAnnotation\x12?\n\x06tracks\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\"\xef\x0b\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12i\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults\"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress\"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01\"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01\"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03\"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03\"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03\"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame\"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment\"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xac\x02\n\x18ObjectTrackingAnnotation\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrameB\x0c\n\ntrack_info\"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request\"\x8a\x08\n\x14StreamingVideoConfig\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12|\n automl_action_recognition_config\x18\x17 \x01(\x0b\x32P.google.cloud.videointelligence.v1p3beta1.StreamingAutomlActionRecognitionConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config\"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t\"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\"$\n\"StreamingShotChangeDetectionConfig\":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08\")\n\'StreamingExplicitContentDetectionConfig\"\x1f\n\x1dStreamingObjectTrackingConfig\"<\n&StreamingAutomlActionRecognitionConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t\"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t\"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t\"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\xb6\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12\'\n#STREAMING_AUTOML_ACTION_RECOGNITION\x10\x17\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16*\x90\x02\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r\x12\x14\n\x10PERSON_DETECTION\x10\x0e\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation\"k\x82\xd3\xe4\x93\x02\x1f\"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse\"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xaf\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZZcloud.google.com/go/videointelligence/apiv1p3beta1/videointelligencepb;videointelligencepb\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p3beta1b\x06proto3"
|
17
|
+
|
18
|
+
pool = Google::Protobuf::DescriptorPool.generated_pool
|
19
|
+
|
20
|
+
begin
|
21
|
+
pool.add_serialized_file(descriptor_data)
|
22
|
+
rescue TypeError => e
|
23
|
+
# Compatibility code: will be removed in the next major version.
|
24
|
+
require 'google/protobuf/descriptor_pb'
|
25
|
+
parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
|
26
|
+
parsed.clear_dependency
|
27
|
+
serialized = parsed.class.encode(parsed)
|
28
|
+
file = pool.add_serialized_file(serialized)
|
29
|
+
warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
|
30
|
+
imports = [
|
31
|
+
["google.protobuf.Duration", "google/protobuf/duration.proto"],
|
32
|
+
["google.rpc.Status", "google/rpc/status.proto"],
|
33
|
+
["google.protobuf.Timestamp", "google/protobuf/timestamp.proto"],
|
34
|
+
]
|
35
|
+
imports.each do |type_name, expected_filename|
|
36
|
+
import_file = pool.lookup(type_name).file_descriptor
|
37
|
+
if import_file.name != expected_filename
|
38
|
+
warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
|
335
39
|
end
|
336
40
|
end
|
41
|
+
warn "Each proto file must use a consistent fully-qualified name."
|
42
|
+
warn "This will become an error in the next major version."
|
337
43
|
end
|
338
44
|
|
339
45
|
module Google
|
@@ -35,7 +35,9 @@ module Google
|
|
35
35
|
# Details about how and where to publish client libraries.
|
36
36
|
# @!attribute [rw] version
|
37
37
|
# @return [::String]
|
38
|
-
# Version of the API to apply these settings to.
|
38
|
+
# Version of the API to apply these settings to. This is the full protobuf
|
39
|
+
# package for the API, ending in the version element.
|
40
|
+
# Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1".
|
39
41
|
# @!attribute [rw] launch_stage
|
40
42
|
# @return [::Google::Api::LaunchStage]
|
41
43
|
# Launch stage of this version of the API.
|
@@ -81,7 +83,7 @@ module Google
|
|
81
83
|
# long-running operation pattern.
|
82
84
|
# @!attribute [rw] new_issue_uri
|
83
85
|
# @return [::String]
|
84
|
-
# Link to a
|
86
|
+
# Link to a *public* URI where users can report issues. Example:
|
85
87
|
# https://issuetracker.google.com/issues/new?component=190865&template=1161103
|
86
88
|
# @!attribute [rw] documentation_uri
|
87
89
|
# @return [::String]
|
@@ -111,6 +113,10 @@ module Google
|
|
111
113
|
# Client library settings. If the same version string appears multiple
|
112
114
|
# times in this list, then the last one wins. Settings from earlier
|
113
115
|
# settings with the same version string are discarded.
|
116
|
+
# @!attribute [rw] proto_reference_documentation_uri
|
117
|
+
# @return [::String]
|
118
|
+
# Optional link to proto reference documentation. Example:
|
119
|
+
# https://cloud.google.com/pubsub/lite/docs/reference/rpc
|
114
120
|
class Publishing
|
115
121
|
include ::Google::Protobuf::MessageExts
|
116
122
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
@@ -203,9 +209,57 @@ module Google
|
|
203
209
|
# @!attribute [rw] common
|
204
210
|
# @return [::Google::Api::CommonLanguageSettings]
|
205
211
|
# Some settings.
|
212
|
+
# @!attribute [rw] renamed_services
|
213
|
+
# @return [::Google::Protobuf::Map{::String => ::String}]
|
214
|
+
# Map from original service names to renamed versions.
|
215
|
+
# This is used when the default generated types
|
216
|
+
# would cause a naming conflict. (Neither name is
|
217
|
+
# fully-qualified.)
|
218
|
+
# Example: Subscriber to SubscriberServiceApi.
|
219
|
+
# @!attribute [rw] renamed_resources
|
220
|
+
# @return [::Google::Protobuf::Map{::String => ::String}]
|
221
|
+
# Map from full resource types to the effective short name
|
222
|
+
# for the resource. This is used when otherwise resource
|
223
|
+
# named from different services would cause naming collisions.
|
224
|
+
# Example entry:
|
225
|
+
# "datalabeling.googleapis.com/Dataset": "DataLabelingDataset"
|
226
|
+
# @!attribute [rw] ignored_resources
|
227
|
+
# @return [::Array<::String>]
|
228
|
+
# List of full resource types to ignore during generation.
|
229
|
+
# This is typically used for API-specific Location resources,
|
230
|
+
# which should be handled by the generator as if they were actually
|
231
|
+
# the common Location resources.
|
232
|
+
# Example entry: "documentai.googleapis.com/Location"
|
233
|
+
# @!attribute [rw] forced_namespace_aliases
|
234
|
+
# @return [::Array<::String>]
|
235
|
+
# Namespaces which must be aliased in snippets due to
|
236
|
+
# a known (but non-generator-predictable) naming collision
|
237
|
+
# @!attribute [rw] handwritten_signatures
|
238
|
+
# @return [::Array<::String>]
|
239
|
+
# Method signatures (in the form "service.method(signature)")
|
240
|
+
# which are provided separately, so shouldn't be generated.
|
241
|
+
# Snippets *calling* these methods are still generated, however.
|
206
242
|
class DotnetSettings
|
207
243
|
include ::Google::Protobuf::MessageExts
|
208
244
|
extend ::Google::Protobuf::MessageExts::ClassMethods
|
245
|
+
|
246
|
+
# @!attribute [rw] key
|
247
|
+
# @return [::String]
|
248
|
+
# @!attribute [rw] value
|
249
|
+
# @return [::String]
|
250
|
+
class RenamedServicesEntry
|
251
|
+
include ::Google::Protobuf::MessageExts
|
252
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
253
|
+
end
|
254
|
+
|
255
|
+
# @!attribute [rw] key
|
256
|
+
# @return [::String]
|
257
|
+
# @!attribute [rw] value
|
258
|
+
# @return [::String]
|
259
|
+
class RenamedResourcesEntry
|
260
|
+
include ::Google::Protobuf::MessageExts
|
261
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
262
|
+
end
|
209
263
|
end
|
210
264
|
|
211
265
|
# Settings for Ruby client libraries.
|
@@ -240,8 +294,8 @@ module Google
|
|
240
294
|
# Example of a YAML configuration::
|
241
295
|
#
|
242
296
|
# publishing:
|
243
|
-
#
|
244
|
-
# - selector:
|
297
|
+
# method_settings:
|
298
|
+
# - selector: google.cloud.speech.v2.Speech.BatchRecognize
|
245
299
|
# long_running:
|
246
300
|
# initial_poll_delay:
|
247
301
|
# seconds: 60 # 1 minute
|
@@ -299,6 +353,15 @@ module Google
|
|
299
353
|
|
300
354
|
# Street View Org.
|
301
355
|
STREET_VIEW = 4
|
356
|
+
|
357
|
+
# Shopping Org.
|
358
|
+
SHOPPING = 5
|
359
|
+
|
360
|
+
# Geo Org.
|
361
|
+
GEO = 6
|
362
|
+
|
363
|
+
# Generative AI - https://developers.generativeai.google
|
364
|
+
GENERATIVE_AI = 7
|
302
365
|
end
|
303
366
|
|
304
367
|
# To where should client libraries be published?
|
@@ -43,8 +43,12 @@ module Google
|
|
43
43
|
# if (any.is(Foo.class)) {
|
44
44
|
# foo = any.unpack(Foo.class);
|
45
45
|
# }
|
46
|
+
# // or ...
|
47
|
+
# if (any.isSameTypeAs(Foo.getDefaultInstance())) {
|
48
|
+
# foo = any.unpack(Foo.getDefaultInstance());
|
49
|
+
# }
|
46
50
|
#
|
47
|
-
#
|
51
|
+
# Example 3: Pack and unpack a message in Python.
|
48
52
|
#
|
49
53
|
# foo = Foo(...)
|
50
54
|
# any = Any()
|
@@ -54,7 +58,7 @@ module Google
|
|
54
58
|
# any.Unpack(foo)
|
55
59
|
# ...
|
56
60
|
#
|
57
|
-
#
|
61
|
+
# Example 4: Pack and unpack a message in Go
|
58
62
|
#
|
59
63
|
# foo := &pb.Foo{...}
|
60
64
|
# any, err := anypb.New(foo)
|
@@ -73,9 +77,8 @@ module Google
|
|
73
77
|
# in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
74
78
|
# name "y.z".
|
75
79
|
#
|
76
|
-
#
|
77
80
|
# JSON
|
78
|
-
#
|
81
|
+
# ====
|
79
82
|
# The JSON representation of an `Any` value uses the regular
|
80
83
|
# representation of the deserialized, embedded message, with an
|
81
84
|
# additional field `@type` which contains the type URL. Example:
|
@@ -69,7 +69,6 @@ module Google
|
|
69
69
|
# Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
70
70
|
# .setNanos((int) ((millis % 1000) * 1000000)).build();
|
71
71
|
#
|
72
|
-
#
|
73
72
|
# Example 5: Compute Timestamp from Java `Instant.now()`.
|
74
73
|
#
|
75
74
|
# Instant now = Instant.now();
|
@@ -78,7 +77,6 @@ module Google
|
|
78
77
|
# Timestamp.newBuilder().setSeconds(now.getEpochSecond())
|
79
78
|
# .setNanos(now.getNano()).build();
|
80
79
|
#
|
81
|
-
#
|
82
80
|
# Example 6: Compute Timestamp from current time in Python.
|
83
81
|
#
|
84
82
|
# timestamp = Timestamp()
|
@@ -108,7 +106,7 @@ module Google
|
|
108
106
|
# [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
109
107
|
# the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
110
108
|
# the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
111
|
-
# http://
|
109
|
+
# http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
|
112
110
|
# ) to obtain a formatter capable of generating timestamps in this format.
|
113
111
|
# @!attribute [rw] seconds
|
114
112
|
# @return [::Integer]
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-cloud-video_intelligence-v1p3beta1
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.4.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Google LLC
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-
|
11
|
+
date: 2023-05-31 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: gapic-common
|
@@ -16,7 +16,7 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - ">="
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: 0.
|
19
|
+
version: 0.19.0
|
20
20
|
- - "<"
|
21
21
|
- !ruby/object:Gem::Version
|
22
22
|
version: 2.a
|
@@ -26,7 +26,7 @@ dependencies:
|
|
26
26
|
requirements:
|
27
27
|
- - ">="
|
28
28
|
- !ruby/object:Gem::Version
|
29
|
-
version: 0.
|
29
|
+
version: 0.19.0
|
30
30
|
- - "<"
|
31
31
|
- !ruby/object:Gem::Version
|
32
32
|
version: 2.a
|
@@ -222,5 +222,7 @@ requirements: []
|
|
222
222
|
rubygems_version: 3.4.2
|
223
223
|
signing_key:
|
224
224
|
specification_version: 4
|
225
|
-
summary:
|
225
|
+
summary: Detects objects, explicit content, and scene changes in videos. It also specifies
|
226
|
+
the region for annotation and transcribes speech to text. Supports both asynchronous
|
227
|
+
API and streaming API.
|
226
228
|
test_files: []
|