google-cloud-video_intelligence-v1 0.8.0 → 0.9.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 23a5d29841f0c439de320561b22e8d890c4ba12caa22b37b1ab3ab1eb8dfbeae
4
- data.tar.gz: a0d5b167c24306398f24237976e229a1ef99ce06a71eca66a353a8fe9bb8ac2d
3
+ metadata.gz: 51d3fc1e7535a46e4cd35ddf1e10baa5b90ccfe05c7df432ee6edf64841b42eb
4
+ data.tar.gz: d9ae050fcff1610885a72a94ed340b7fba6907aad7f653bfc8ced22d9d18ecea
5
5
  SHA512:
6
- metadata.gz: 825409b720f1a58a13693b8449a247795e0b038b602717bdbbb359b301c71ed562417adaa3ead09a61d5a75724eb76cef37ce0a8d457b91cf7fb409200cfecd9
7
- data.tar.gz: 923f66550fccaf72199e09656242346bbc176190b0d0dce4e5be553ce237fc563a2e47297e08fc00caaee0a0f2220bd9e706e65712391e1b3718d6788a1514cf
6
+ metadata.gz: 9fdda7bca0a6f2fc15fe8bc40dc07823c40324d6765bf07a0d8647fd68a240650cc4b54f08ea44e004f9533ac353870c51bc3c207cdc5bd97cfe4d2532d0953b
7
+ data.tar.gz: 26f7e0283572f38f9d4bf0af2e37b0788a9141d849ef05c72413eea5a36e3800054d162499e8f8a99f540eecdfd432d45aba2dff5156e4f24c59acf475fca247
data/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ruby Client for the Cloud Video Intelligence V1 API
2
2
 
3
- API Client library for the Cloud Video Intelligence V1 API
3
+ Detects objects, explicit content, and scene changes in videos. It also specifies the region for annotation and transcribes speech to text. Supports both asynchronous API and streaming API.
4
4
 
5
5
  Detects objects, explicit content, and scene changes in videos. It also specifies the region for annotation and transcribes speech to text. Supports both asynchronous API and streaming API.
6
6
 
@@ -47,7 +47,7 @@ for general usage information.
47
47
 
48
48
  To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
49
49
  The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/current/stdlibs/logger/Logger.html) as shown below,
50
- or a [`Google::Cloud::Logging::Logger`](https://googleapis.dev/ruby/google-cloud-logging/latest)
50
+ or a [`Google::Cloud::Logging::Logger`](https://cloud.google.com/ruby/docs/reference/google-cloud-logging/latest)
51
51
  that will write logs to [Cloud Logging](https://cloud.google.com/logging/). See [grpc/logconfig.rb](https://github.com/grpc/grpc/blob/master/src/ruby/lib/grpc/logconfig.rb)
52
52
  and the gRPC [spec_helper.rb](https://github.com/grpc/grpc/blob/master/src/ruby/spec/spec_helper.rb) for additional information.
53
53
 
@@ -21,7 +21,7 @@ module Google
21
21
  module Cloud
22
22
  module VideoIntelligence
23
23
  module V1
24
- VERSION = "0.8.0"
24
+ VERSION = "0.9.0"
25
25
  end
26
26
  end
27
27
  end
@@ -317,9 +317,9 @@ module Google
317
317
  # * (`String`) The path to a service account key file in JSON format
318
318
  # * (`Hash`) A service account key as a Hash
319
319
  # * (`Google::Auth::Credentials`) A googleauth credentials object
320
- # (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
320
+ # (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
321
321
  # * (`Signet::OAuth2::Client`) A signet oauth2 client object
322
- # (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
322
+ # (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
323
323
  # * (`GRPC::Core::Channel`) a gRPC channel with included credentials
324
324
  # * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
325
325
  # * (`nil`) indicating no credentials
@@ -620,9 +620,9 @@ module Google
620
620
  # * (`String`) The path to a service account key file in JSON format
621
621
  # * (`Hash`) A service account key as a Hash
622
622
  # * (`Google::Auth::Credentials`) A googleauth credentials object
623
- # (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
623
+ # (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
624
624
  # * (`Signet::OAuth2::Client`) A signet oauth2 client object
625
- # (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
625
+ # (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
626
626
  # * (`GRPC::Core::Channel`) a gRPC channel with included credentials
627
627
  # * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
628
628
  # * (`nil`) indicating no credentials
@@ -286,9 +286,9 @@ module Google
286
286
  # * (`String`) The path to a service account key file in JSON format
287
287
  # * (`Hash`) A service account key as a Hash
288
288
  # * (`Google::Auth::Credentials`) A googleauth credentials object
289
- # (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
289
+ # (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
290
290
  # * (`Signet::OAuth2::Client`) A signet oauth2 client object
291
- # (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
291
+ # (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
292
292
  # * (`nil`) indicating no credentials
293
293
  # @return [::Object]
294
294
  # @!attribute [rw] scope
@@ -411,9 +411,9 @@ module Google
411
411
  # * (`String`) The path to a service account key file in JSON format
412
412
  # * (`Hash`) A service account key as a Hash
413
413
  # * (`Google::Auth::Credentials`) A googleauth credentials object
414
- # (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
414
+ # (see the [googleauth docs](https://rubydoc.info/gems/googleauth/Google/Auth/Credentials))
415
415
  # * (`Signet::OAuth2::Client`) A signet oauth2 client object
416
- # (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
416
+ # (see the [signet docs](https://rubydoc.info/gems/signet/Signet/OAuth2/Client))
417
417
  # * (`nil`) indicating no credentials
418
418
  # @return [::Object]
419
419
  # @!attribute [rw] scope
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  # Generated by the protocol buffer compiler. DO NOT EDIT!
2
3
  # source: google/cloud/videointelligence/v1/video_intelligence.proto
3
4
 
@@ -11,268 +12,34 @@ require 'google/protobuf/duration_pb'
11
12
  require 'google/protobuf/timestamp_pb'
12
13
  require 'google/rpc/status_pb'
13
14
 
14
- Google::Protobuf::DescriptorPool.generated_pool.build do
15
- add_file("google/cloud/videointelligence/v1/video_intelligence.proto", :syntax => :proto3) do
16
- add_message "google.cloud.videointelligence.v1.AnnotateVideoRequest" do
17
- optional :input_uri, :string, 1
18
- optional :input_content, :bytes, 6
19
- repeated :features, :enum, 2, "google.cloud.videointelligence.v1.Feature"
20
- optional :video_context, :message, 3, "google.cloud.videointelligence.v1.VideoContext"
21
- optional :output_uri, :string, 4
22
- optional :location_id, :string, 5
23
- end
24
- add_message "google.cloud.videointelligence.v1.VideoContext" do
25
- repeated :segments, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
26
- optional :label_detection_config, :message, 2, "google.cloud.videointelligence.v1.LabelDetectionConfig"
27
- optional :shot_change_detection_config, :message, 3, "google.cloud.videointelligence.v1.ShotChangeDetectionConfig"
28
- optional :explicit_content_detection_config, :message, 4, "google.cloud.videointelligence.v1.ExplicitContentDetectionConfig"
29
- optional :face_detection_config, :message, 5, "google.cloud.videointelligence.v1.FaceDetectionConfig"
30
- optional :speech_transcription_config, :message, 6, "google.cloud.videointelligence.v1.SpeechTranscriptionConfig"
31
- optional :text_detection_config, :message, 8, "google.cloud.videointelligence.v1.TextDetectionConfig"
32
- optional :person_detection_config, :message, 11, "google.cloud.videointelligence.v1.PersonDetectionConfig"
33
- optional :object_tracking_config, :message, 13, "google.cloud.videointelligence.v1.ObjectTrackingConfig"
34
- end
35
- add_message "google.cloud.videointelligence.v1.LabelDetectionConfig" do
36
- optional :label_detection_mode, :enum, 1, "google.cloud.videointelligence.v1.LabelDetectionMode"
37
- optional :stationary_camera, :bool, 2
38
- optional :model, :string, 3
39
- optional :frame_confidence_threshold, :float, 4
40
- optional :video_confidence_threshold, :float, 5
41
- end
42
- add_message "google.cloud.videointelligence.v1.ShotChangeDetectionConfig" do
43
- optional :model, :string, 1
44
- end
45
- add_message "google.cloud.videointelligence.v1.ObjectTrackingConfig" do
46
- optional :model, :string, 1
47
- end
48
- add_message "google.cloud.videointelligence.v1.FaceDetectionConfig" do
49
- optional :model, :string, 1
50
- optional :include_bounding_boxes, :bool, 2
51
- optional :include_attributes, :bool, 5
52
- end
53
- add_message "google.cloud.videointelligence.v1.PersonDetectionConfig" do
54
- optional :include_bounding_boxes, :bool, 1
55
- optional :include_pose_landmarks, :bool, 2
56
- optional :include_attributes, :bool, 3
57
- end
58
- add_message "google.cloud.videointelligence.v1.ExplicitContentDetectionConfig" do
59
- optional :model, :string, 1
60
- end
61
- add_message "google.cloud.videointelligence.v1.TextDetectionConfig" do
62
- repeated :language_hints, :string, 1
63
- optional :model, :string, 2
64
- end
65
- add_message "google.cloud.videointelligence.v1.VideoSegment" do
66
- optional :start_time_offset, :message, 1, "google.protobuf.Duration"
67
- optional :end_time_offset, :message, 2, "google.protobuf.Duration"
68
- end
69
- add_message "google.cloud.videointelligence.v1.LabelSegment" do
70
- optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
71
- optional :confidence, :float, 2
72
- end
73
- add_message "google.cloud.videointelligence.v1.LabelFrame" do
74
- optional :time_offset, :message, 1, "google.protobuf.Duration"
75
- optional :confidence, :float, 2
76
- end
77
- add_message "google.cloud.videointelligence.v1.Entity" do
78
- optional :entity_id, :string, 1
79
- optional :description, :string, 2
80
- optional :language_code, :string, 3
81
- end
82
- add_message "google.cloud.videointelligence.v1.LabelAnnotation" do
83
- optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
84
- repeated :category_entities, :message, 2, "google.cloud.videointelligence.v1.Entity"
85
- repeated :segments, :message, 3, "google.cloud.videointelligence.v1.LabelSegment"
86
- repeated :frames, :message, 4, "google.cloud.videointelligence.v1.LabelFrame"
87
- optional :version, :string, 5
88
- end
89
- add_message "google.cloud.videointelligence.v1.ExplicitContentFrame" do
90
- optional :time_offset, :message, 1, "google.protobuf.Duration"
91
- optional :pornography_likelihood, :enum, 2, "google.cloud.videointelligence.v1.Likelihood"
92
- end
93
- add_message "google.cloud.videointelligence.v1.ExplicitContentAnnotation" do
94
- repeated :frames, :message, 1, "google.cloud.videointelligence.v1.ExplicitContentFrame"
95
- optional :version, :string, 2
96
- end
97
- add_message "google.cloud.videointelligence.v1.NormalizedBoundingBox" do
98
- optional :left, :float, 1
99
- optional :top, :float, 2
100
- optional :right, :float, 3
101
- optional :bottom, :float, 4
102
- end
103
- add_message "google.cloud.videointelligence.v1.FaceDetectionAnnotation" do
104
- repeated :tracks, :message, 3, "google.cloud.videointelligence.v1.Track"
105
- optional :thumbnail, :bytes, 4
106
- optional :version, :string, 5
107
- end
108
- add_message "google.cloud.videointelligence.v1.PersonDetectionAnnotation" do
109
- repeated :tracks, :message, 1, "google.cloud.videointelligence.v1.Track"
110
- optional :version, :string, 2
111
- end
112
- add_message "google.cloud.videointelligence.v1.FaceSegment" do
113
- optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
114
- end
115
- add_message "google.cloud.videointelligence.v1.FaceFrame" do
116
- repeated :normalized_bounding_boxes, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
117
- optional :time_offset, :message, 2, "google.protobuf.Duration"
118
- end
119
- add_message "google.cloud.videointelligence.v1.FaceAnnotation" do
120
- optional :thumbnail, :bytes, 1
121
- repeated :segments, :message, 2, "google.cloud.videointelligence.v1.FaceSegment"
122
- repeated :frames, :message, 3, "google.cloud.videointelligence.v1.FaceFrame"
123
- end
124
- add_message "google.cloud.videointelligence.v1.TimestampedObject" do
125
- optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
126
- optional :time_offset, :message, 2, "google.protobuf.Duration"
127
- repeated :attributes, :message, 3, "google.cloud.videointelligence.v1.DetectedAttribute"
128
- repeated :landmarks, :message, 4, "google.cloud.videointelligence.v1.DetectedLandmark"
129
- end
130
- add_message "google.cloud.videointelligence.v1.Track" do
131
- optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
132
- repeated :timestamped_objects, :message, 2, "google.cloud.videointelligence.v1.TimestampedObject"
133
- repeated :attributes, :message, 3, "google.cloud.videointelligence.v1.DetectedAttribute"
134
- optional :confidence, :float, 4
135
- end
136
- add_message "google.cloud.videointelligence.v1.DetectedAttribute" do
137
- optional :name, :string, 1
138
- optional :confidence, :float, 2
139
- optional :value, :string, 3
140
- end
141
- add_message "google.cloud.videointelligence.v1.DetectedLandmark" do
142
- optional :name, :string, 1
143
- optional :point, :message, 2, "google.cloud.videointelligence.v1.NormalizedVertex"
144
- optional :confidence, :float, 3
145
- end
146
- add_message "google.cloud.videointelligence.v1.VideoAnnotationResults" do
147
- optional :input_uri, :string, 1
148
- optional :segment, :message, 10, "google.cloud.videointelligence.v1.VideoSegment"
149
- repeated :segment_label_annotations, :message, 2, "google.cloud.videointelligence.v1.LabelAnnotation"
150
- repeated :segment_presence_label_annotations, :message, 23, "google.cloud.videointelligence.v1.LabelAnnotation"
151
- repeated :shot_label_annotations, :message, 3, "google.cloud.videointelligence.v1.LabelAnnotation"
152
- repeated :shot_presence_label_annotations, :message, 24, "google.cloud.videointelligence.v1.LabelAnnotation"
153
- repeated :frame_label_annotations, :message, 4, "google.cloud.videointelligence.v1.LabelAnnotation"
154
- repeated :face_annotations, :message, 5, "google.cloud.videointelligence.v1.FaceAnnotation"
155
- repeated :face_detection_annotations, :message, 13, "google.cloud.videointelligence.v1.FaceDetectionAnnotation"
156
- repeated :shot_annotations, :message, 6, "google.cloud.videointelligence.v1.VideoSegment"
157
- optional :explicit_annotation, :message, 7, "google.cloud.videointelligence.v1.ExplicitContentAnnotation"
158
- repeated :speech_transcriptions, :message, 11, "google.cloud.videointelligence.v1.SpeechTranscription"
159
- repeated :text_annotations, :message, 12, "google.cloud.videointelligence.v1.TextAnnotation"
160
- repeated :object_annotations, :message, 14, "google.cloud.videointelligence.v1.ObjectTrackingAnnotation"
161
- repeated :logo_recognition_annotations, :message, 19, "google.cloud.videointelligence.v1.LogoRecognitionAnnotation"
162
- repeated :person_detection_annotations, :message, 20, "google.cloud.videointelligence.v1.PersonDetectionAnnotation"
163
- optional :error, :message, 9, "google.rpc.Status"
164
- end
165
- add_message "google.cloud.videointelligence.v1.AnnotateVideoResponse" do
166
- repeated :annotation_results, :message, 1, "google.cloud.videointelligence.v1.VideoAnnotationResults"
167
- end
168
- add_message "google.cloud.videointelligence.v1.VideoAnnotationProgress" do
169
- optional :input_uri, :string, 1
170
- optional :progress_percent, :int32, 2
171
- optional :start_time, :message, 3, "google.protobuf.Timestamp"
172
- optional :update_time, :message, 4, "google.protobuf.Timestamp"
173
- optional :feature, :enum, 5, "google.cloud.videointelligence.v1.Feature"
174
- optional :segment, :message, 6, "google.cloud.videointelligence.v1.VideoSegment"
175
- end
176
- add_message "google.cloud.videointelligence.v1.AnnotateVideoProgress" do
177
- repeated :annotation_progress, :message, 1, "google.cloud.videointelligence.v1.VideoAnnotationProgress"
178
- end
179
- add_message "google.cloud.videointelligence.v1.SpeechTranscriptionConfig" do
180
- optional :language_code, :string, 1
181
- optional :max_alternatives, :int32, 2
182
- optional :filter_profanity, :bool, 3
183
- repeated :speech_contexts, :message, 4, "google.cloud.videointelligence.v1.SpeechContext"
184
- optional :enable_automatic_punctuation, :bool, 5
185
- repeated :audio_tracks, :int32, 6
186
- optional :enable_speaker_diarization, :bool, 7
187
- optional :diarization_speaker_count, :int32, 8
188
- optional :enable_word_confidence, :bool, 9
189
- end
190
- add_message "google.cloud.videointelligence.v1.SpeechContext" do
191
- repeated :phrases, :string, 1
192
- end
193
- add_message "google.cloud.videointelligence.v1.SpeechTranscription" do
194
- repeated :alternatives, :message, 1, "google.cloud.videointelligence.v1.SpeechRecognitionAlternative"
195
- optional :language_code, :string, 2
196
- end
197
- add_message "google.cloud.videointelligence.v1.SpeechRecognitionAlternative" do
198
- optional :transcript, :string, 1
199
- optional :confidence, :float, 2
200
- repeated :words, :message, 3, "google.cloud.videointelligence.v1.WordInfo"
201
- end
202
- add_message "google.cloud.videointelligence.v1.WordInfo" do
203
- optional :start_time, :message, 1, "google.protobuf.Duration"
204
- optional :end_time, :message, 2, "google.protobuf.Duration"
205
- optional :word, :string, 3
206
- optional :confidence, :float, 4
207
- optional :speaker_tag, :int32, 5
208
- end
209
- add_message "google.cloud.videointelligence.v1.NormalizedVertex" do
210
- optional :x, :float, 1
211
- optional :y, :float, 2
212
- end
213
- add_message "google.cloud.videointelligence.v1.NormalizedBoundingPoly" do
214
- repeated :vertices, :message, 1, "google.cloud.videointelligence.v1.NormalizedVertex"
215
- end
216
- add_message "google.cloud.videointelligence.v1.TextSegment" do
217
- optional :segment, :message, 1, "google.cloud.videointelligence.v1.VideoSegment"
218
- optional :confidence, :float, 2
219
- repeated :frames, :message, 3, "google.cloud.videointelligence.v1.TextFrame"
220
- end
221
- add_message "google.cloud.videointelligence.v1.TextFrame" do
222
- optional :rotated_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingPoly"
223
- optional :time_offset, :message, 2, "google.protobuf.Duration"
224
- end
225
- add_message "google.cloud.videointelligence.v1.TextAnnotation" do
226
- optional :text, :string, 1
227
- repeated :segments, :message, 2, "google.cloud.videointelligence.v1.TextSegment"
228
- optional :version, :string, 3
229
- end
230
- add_message "google.cloud.videointelligence.v1.ObjectTrackingFrame" do
231
- optional :normalized_bounding_box, :message, 1, "google.cloud.videointelligence.v1.NormalizedBoundingBox"
232
- optional :time_offset, :message, 2, "google.protobuf.Duration"
233
- end
234
- add_message "google.cloud.videointelligence.v1.ObjectTrackingAnnotation" do
235
- optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
236
- optional :confidence, :float, 4
237
- repeated :frames, :message, 2, "google.cloud.videointelligence.v1.ObjectTrackingFrame"
238
- optional :version, :string, 6
239
- oneof :track_info do
240
- optional :segment, :message, 3, "google.cloud.videointelligence.v1.VideoSegment"
241
- optional :track_id, :int64, 5
242
- end
243
- end
244
- add_message "google.cloud.videointelligence.v1.LogoRecognitionAnnotation" do
245
- optional :entity, :message, 1, "google.cloud.videointelligence.v1.Entity"
246
- repeated :tracks, :message, 2, "google.cloud.videointelligence.v1.Track"
247
- repeated :segments, :message, 3, "google.cloud.videointelligence.v1.VideoSegment"
248
- end
249
- add_enum "google.cloud.videointelligence.v1.Feature" do
250
- value :FEATURE_UNSPECIFIED, 0
251
- value :LABEL_DETECTION, 1
252
- value :SHOT_CHANGE_DETECTION, 2
253
- value :EXPLICIT_CONTENT_DETECTION, 3
254
- value :FACE_DETECTION, 4
255
- value :SPEECH_TRANSCRIPTION, 6
256
- value :TEXT_DETECTION, 7
257
- value :OBJECT_TRACKING, 9
258
- value :LOGO_RECOGNITION, 12
259
- value :PERSON_DETECTION, 14
260
- end
261
- add_enum "google.cloud.videointelligence.v1.LabelDetectionMode" do
262
- value :LABEL_DETECTION_MODE_UNSPECIFIED, 0
263
- value :SHOT_MODE, 1
264
- value :FRAME_MODE, 2
265
- value :SHOT_AND_FRAME_MODE, 3
266
- end
267
- add_enum "google.cloud.videointelligence.v1.Likelihood" do
268
- value :LIKELIHOOD_UNSPECIFIED, 0
269
- value :VERY_UNLIKELY, 1
270
- value :UNLIKELY, 2
271
- value :POSSIBLE, 3
272
- value :LIKELY, 4
273
- value :VERY_LIKELY, 5
15
+
16
+ descriptor_data = "\n:google/cloud/videointelligence/v1/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xfe\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.FeatureB\x03\xe0\x41\x02\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01\"\xc1\x06\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12Y\n\x17person_detection_config\x18\x0b \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.PersonDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig\"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12\"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12\"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02\"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t\"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08\"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08\"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t\"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02\"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t\"\xa5\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame\x12\x0f\n\x07version\x18\x05 \x01(\t\"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood\"u\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame\x12\x0f\n\x07version\x18\x02 \x01(\t\"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02\"w\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12\x38\n\x06tracks\x18\x03 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c\x12\x0f\n\x07version\x18\x05 \x01(\t\"f\n\x19PersonDetectionAnnotation\x12\x38\n\x06tracks\x18\x01 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x0f\n\x07version\x18\x02 \x01(\t\"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\"\x9c\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration:\x02\x18\x01\"\xa7\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame:\x02\x18\x01\"\xba\x02\n\x11TimestampedObject\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12K\n\tlandmarks\x18\x04 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.DetectedLandmarkB\x03\xe0\x41\x01\"\x84\x02\n\x05Track\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Q\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.TimestampedObject\x12M\n\nattributes\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01\"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t\"x\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x05point\x18\x02 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02\"\xe9\n\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12@\n\x07segment\x18\n \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12^\n\"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12[\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12O\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotationB\x02\x18\x01\x12^\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32:.google.cloud.videointelligence.v1.FaceDetectionAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12\x62\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32<.google.cloud.videointelligence.v1.LogoRecognitionAnnotation\x12\x62\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32<.google.cloud.videointelligence.v1.PersonDetectionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults\"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress\"\x81\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12N\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01\"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01\"\x88\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03\"\x8c\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12?\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfoB\x03\xe0\x41\x03\"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03\"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex\"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame\"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"q\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment\x12\x0f\n\x07version\x18\x03 \x01(\t\"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xa8\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrame\x12\x0f\n\x07version\x18\x06 \x01(\tB\x0c\n\ntrack_info\"\xd3\x01\n\x19LogoRecognitionAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x38\n\x06tracks\x18\x02 \x03(\x0b\x32(.google.cloud.videointelligence.v1.Track\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment*\xf5\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc0\x02\n\x18VideoIntelligenceService\x12\xcd\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation\"d\x82\xd3\xe4\x93\x02\x18\"\x13/v1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8c\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZScloud.google.com/go/videointelligence/apiv1/videointelligencepb;videointelligencepb\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3"
17
+
18
+ pool = Google::Protobuf::DescriptorPool.generated_pool
19
+
20
+ begin
21
+ pool.add_serialized_file(descriptor_data)
22
+ rescue TypeError => e
23
+ # Compatibility code: will be removed in the next major version.
24
+ require 'google/protobuf/descriptor_pb'
25
+ parsed = Google::Protobuf::FileDescriptorProto.decode(descriptor_data)
26
+ parsed.clear_dependency
27
+ serialized = parsed.class.encode(parsed)
28
+ file = pool.add_serialized_file(serialized)
29
+ warn "Warning: Protobuf detected an import path issue while loading generated file #{__FILE__}"
30
+ imports = [
31
+ ["google.protobuf.Duration", "google/protobuf/duration.proto"],
32
+ ["google.rpc.Status", "google/rpc/status.proto"],
33
+ ["google.protobuf.Timestamp", "google/protobuf/timestamp.proto"],
34
+ ]
35
+ imports.each do |type_name, expected_filename|
36
+ import_file = pool.lookup(type_name).file_descriptor
37
+ if import_file.name != expected_filename
38
+ warn "- #{file.name} imports #{expected_filename}, but that import was loaded as #{import_file.name}"
274
39
  end
275
40
  end
41
+ warn "Each proto file must use a consistent fully-qualified name."
42
+ warn "This will become an error in the next major version."
276
43
  end
277
44
 
278
45
  module Google
@@ -35,7 +35,9 @@ module Google
35
35
  # Details about how and where to publish client libraries.
36
36
  # @!attribute [rw] version
37
37
  # @return [::String]
38
- # Version of the API to apply these settings to.
38
+ # Version of the API to apply these settings to. This is the full protobuf
39
+ # package for the API, ending in the version element.
40
+ # Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1".
39
41
  # @!attribute [rw] launch_stage
40
42
  # @return [::Google::Api::LaunchStage]
41
43
  # Launch stage of this version of the API.
@@ -81,7 +83,7 @@ module Google
81
83
  # long-running operation pattern.
82
84
  # @!attribute [rw] new_issue_uri
83
85
  # @return [::String]
84
- # Link to a place that API users can report issues. Example:
86
+ # Link to a *public* URI where users can report issues. Example:
85
87
  # https://issuetracker.google.com/issues/new?component=190865&template=1161103
86
88
  # @!attribute [rw] documentation_uri
87
89
  # @return [::String]
@@ -111,6 +113,10 @@ module Google
111
113
  # Client library settings. If the same version string appears multiple
112
114
  # times in this list, then the last one wins. Settings from earlier
113
115
  # settings with the same version string are discarded.
116
+ # @!attribute [rw] proto_reference_documentation_uri
117
+ # @return [::String]
118
+ # Optional link to proto reference documentation. Example:
119
+ # https://cloud.google.com/pubsub/lite/docs/reference/rpc
114
120
  class Publishing
115
121
  include ::Google::Protobuf::MessageExts
116
122
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -203,9 +209,57 @@ module Google
203
209
  # @!attribute [rw] common
204
210
  # @return [::Google::Api::CommonLanguageSettings]
205
211
  # Some settings.
212
+ # @!attribute [rw] renamed_services
213
+ # @return [::Google::Protobuf::Map{::String => ::String}]
214
+ # Map from original service names to renamed versions.
215
+ # This is used when the default generated types
216
+ # would cause a naming conflict. (Neither name is
217
+ # fully-qualified.)
218
+ # Example: Subscriber to SubscriberServiceApi.
219
+ # @!attribute [rw] renamed_resources
220
+ # @return [::Google::Protobuf::Map{::String => ::String}]
221
+ # Map from full resource types to the effective short name
222
+ # for the resource. This is used when otherwise resource
223
+ # named from different services would cause naming collisions.
224
+ # Example entry:
225
+ # "datalabeling.googleapis.com/Dataset": "DataLabelingDataset"
226
+ # @!attribute [rw] ignored_resources
227
+ # @return [::Array<::String>]
228
+ # List of full resource types to ignore during generation.
229
+ # This is typically used for API-specific Location resources,
230
+ # which should be handled by the generator as if they were actually
231
+ # the common Location resources.
232
+ # Example entry: "documentai.googleapis.com/Location"
233
+ # @!attribute [rw] forced_namespace_aliases
234
+ # @return [::Array<::String>]
235
+ # Namespaces which must be aliased in snippets due to
236
+ # a known (but non-generator-predictable) naming collision
237
+ # @!attribute [rw] handwritten_signatures
238
+ # @return [::Array<::String>]
239
+ # Method signatures (in the form "service.method(signature)")
240
+ # which are provided separately, so shouldn't be generated.
241
+ # Snippets *calling* these methods are still generated, however.
206
242
  class DotnetSettings
207
243
  include ::Google::Protobuf::MessageExts
208
244
  extend ::Google::Protobuf::MessageExts::ClassMethods
245
+
246
+ # @!attribute [rw] key
247
+ # @return [::String]
248
+ # @!attribute [rw] value
249
+ # @return [::String]
250
+ class RenamedServicesEntry
251
+ include ::Google::Protobuf::MessageExts
252
+ extend ::Google::Protobuf::MessageExts::ClassMethods
253
+ end
254
+
255
+ # @!attribute [rw] key
256
+ # @return [::String]
257
+ # @!attribute [rw] value
258
+ # @return [::String]
259
+ class RenamedResourcesEntry
260
+ include ::Google::Protobuf::MessageExts
261
+ extend ::Google::Protobuf::MessageExts::ClassMethods
262
+ end
209
263
  end
210
264
 
211
265
  # Settings for Ruby client libraries.
@@ -240,8 +294,8 @@ module Google
240
294
  # Example of a YAML configuration::
241
295
  #
242
296
  # publishing:
243
- # method_behavior:
244
- # - selector: CreateAdDomain
297
+ # method_settings:
298
+ # - selector: google.cloud.speech.v2.Speech.BatchRecognize
245
299
  # long_running:
246
300
  # initial_poll_delay:
247
301
  # seconds: 60 # 1 minute
@@ -299,6 +353,15 @@ module Google
299
353
 
300
354
  # Street View Org.
301
355
  STREET_VIEW = 4
356
+
357
+ # Shopping Org.
358
+ SHOPPING = 5
359
+
360
+ # Geo Org.
361
+ GEO = 6
362
+
363
+ # Generative AI - https://developers.generativeai.google
364
+ GENERATIVE_AI = 7
302
365
  end
303
366
 
304
367
  # To where should client libraries be published?
@@ -43,8 +43,12 @@ module Google
43
43
  # if (any.is(Foo.class)) {
44
44
  # foo = any.unpack(Foo.class);
45
45
  # }
46
+ # // or ...
47
+ # if (any.isSameTypeAs(Foo.getDefaultInstance())) {
48
+ # foo = any.unpack(Foo.getDefaultInstance());
49
+ # }
46
50
  #
47
- # Example 3: Pack and unpack a message in Python.
51
+ # Example 3: Pack and unpack a message in Python.
48
52
  #
49
53
  # foo = Foo(...)
50
54
  # any = Any()
@@ -54,7 +58,7 @@ module Google
54
58
  # any.Unpack(foo)
55
59
  # ...
56
60
  #
57
- # Example 4: Pack and unpack a message in Go
61
+ # Example 4: Pack and unpack a message in Go
58
62
  #
59
63
  # foo := &pb.Foo{...}
60
64
  # any, err := anypb.New(foo)
@@ -73,9 +77,8 @@ module Google
73
77
  # in the type URL, for example "foo.bar.com/x/y.z" will yield type
74
78
  # name "y.z".
75
79
  #
76
- #
77
80
  # JSON
78
- #
81
+ # ====
79
82
  # The JSON representation of an `Any` value uses the regular
80
83
  # representation of the deserialized, embedded message, with an
81
84
  # additional field `@type` which contains the type URL. Example:
@@ -69,7 +69,6 @@ module Google
69
69
  # Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
70
70
  # .setNanos((int) ((millis % 1000) * 1000000)).build();
71
71
  #
72
- #
73
72
  # Example 5: Compute Timestamp from Java `Instant.now()`.
74
73
  #
75
74
  # Instant now = Instant.now();
@@ -78,7 +77,6 @@ module Google
78
77
  # Timestamp.newBuilder().setSeconds(now.getEpochSecond())
79
78
  # .setNanos(now.getNano()).build();
80
79
  #
81
- #
82
80
  # Example 6: Compute Timestamp from current time in Python.
83
81
  #
84
82
  # timestamp = Timestamp()
@@ -108,7 +106,7 @@ module Google
108
106
  # [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
109
107
  # the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
110
108
  # the Joda Time's [`ISODateTimeFormat.dateTime()`](
111
- # http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
109
+ # http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
112
110
  # ) to obtain a formatter capable of generating timestamps in this format.
113
111
  # @!attribute [rw] seconds
114
112
  # @return [::Integer]
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-video_intelligence-v1
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.0
4
+ version: 0.9.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-02-23 00:00:00.000000000 Z
11
+ date: 2023-05-31 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common
@@ -16,7 +16,7 @@ dependencies:
16
16
  requirements:
17
17
  - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: 0.17.1
19
+ version: 0.19.0
20
20
  - - "<"
21
21
  - !ruby/object:Gem::Version
22
22
  version: 2.a
@@ -26,7 +26,7 @@ dependencies:
26
26
  requirements:
27
27
  - - ">="
28
28
  - !ruby/object:Gem::Version
29
- version: 0.17.1
29
+ version: 0.19.0
30
30
  - - "<"
31
31
  - !ruby/object:Gem::Version
32
32
  version: 2.a
@@ -219,5 +219,7 @@ requirements: []
219
219
  rubygems_version: 3.4.2
220
220
  signing_key:
221
221
  specification_version: 4
222
- summary: API Client library for the Cloud Video Intelligence V1 API
222
+ summary: Detects objects, explicit content, and scene changes in videos. It also specifies
223
+ the region for annotation and transcribes speech to text. Supports both asynchronous
224
+ API and streaming API.
223
225
  test_files: []